1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <sys/queue.h> 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <unistd.h> 10 11 #include <rte_common.h> 12 #include <rte_ether.h> 13 #include <ethdev_driver.h> 14 #include <rte_flow.h> 15 #include <rte_flow_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_cycles.h> 18 #include <rte_ip.h> 19 #include <rte_gre.h> 20 #include <rte_vxlan.h> 21 #include <rte_gtp.h> 22 #include <rte_eal_paging.h> 23 #include <rte_mpls.h> 24 25 #include <mlx5_glue.h> 26 #include <mlx5_devx_cmds.h> 27 #include <mlx5_prm.h> 28 #include <mlx5_malloc.h> 29 30 #include "mlx5_defs.h" 31 #include "mlx5.h" 32 #include "mlx5_common_os.h" 33 #include "mlx5_flow.h" 34 #include "mlx5_flow_os.h" 35 #include "mlx5_rx.h" 36 #include "mlx5_tx.h" 37 #include "rte_pmd_mlx5.h" 38 39 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 40 41 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS 42 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 43 #endif 44 45 #ifndef HAVE_MLX5DV_DR_ESWITCH 46 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB 47 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0 48 #endif 49 #endif 50 51 #ifndef HAVE_MLX5DV_DR 52 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 53 #endif 54 55 /* VLAN header definitions */ 56 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 57 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) 58 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff 59 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK) 60 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK) 61 62 union flow_dv_attr { 63 struct { 64 uint32_t valid:1; 65 uint32_t ipv4:1; 66 uint32_t ipv6:1; 67 uint32_t tcp:1; 68 uint32_t udp:1; 69 uint32_t reserved:27; 70 }; 71 uint32_t attr; 72 }; 73 74 static int 75 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, 76 struct mlx5_flow_tbl_resource *tbl); 77 78 static int 79 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, 80 uint32_t encap_decap_idx); 81 82 static int 83 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, 84 uint32_t port_id); 85 static void 86 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss); 87 88 static int 89 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, 90 uint32_t rix_jump); 91 92 /** 93 * Initialize flow attributes structure according to flow items' types. 94 * 95 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel 96 * mode. For tunnel mode, the items to be modified are the outermost ones. 97 * 98 * @param[in] item 99 * Pointer to item specification. 100 * @param[out] attr 101 * Pointer to flow attributes structure. 102 * @param[in] dev_flow 103 * Pointer to the sub flow. 104 * @param[in] tunnel_decap 105 * Whether action is after tunnel decapsulation. 106 */ 107 static void 108 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, 109 struct mlx5_flow *dev_flow, bool tunnel_decap) 110 { 111 uint64_t layers = dev_flow->handle->layers; 112 113 /* 114 * If layers is already initialized, it means this dev_flow is the 115 * suffix flow, the layers flags is set by the prefix flow. Need to 116 * use the layer flags from prefix flow as the suffix flow may not 117 * have the user defined items as the flow is split. 118 */ 119 if (layers) { 120 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) 121 attr->ipv4 = 1; 122 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) 123 attr->ipv6 = 1; 124 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) 125 attr->tcp = 1; 126 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) 127 attr->udp = 1; 128 attr->valid = 1; 129 return; 130 } 131 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 132 uint8_t next_protocol = 0xff; 133 switch (item->type) { 134 case RTE_FLOW_ITEM_TYPE_GRE: 135 case RTE_FLOW_ITEM_TYPE_NVGRE: 136 case RTE_FLOW_ITEM_TYPE_VXLAN: 137 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 138 case RTE_FLOW_ITEM_TYPE_GENEVE: 139 case RTE_FLOW_ITEM_TYPE_MPLS: 140 if (tunnel_decap) 141 attr->attr = 0; 142 break; 143 case RTE_FLOW_ITEM_TYPE_IPV4: 144 if (!attr->ipv6) 145 attr->ipv4 = 1; 146 if (item->mask != NULL && 147 ((const struct rte_flow_item_ipv4 *) 148 item->mask)->hdr.next_proto_id) 149 next_protocol = 150 ((const struct rte_flow_item_ipv4 *) 151 (item->spec))->hdr.next_proto_id & 152 ((const struct rte_flow_item_ipv4 *) 153 (item->mask))->hdr.next_proto_id; 154 if ((next_protocol == IPPROTO_IPIP || 155 next_protocol == IPPROTO_IPV6) && tunnel_decap) 156 attr->attr = 0; 157 break; 158 case RTE_FLOW_ITEM_TYPE_IPV6: 159 if (!attr->ipv4) 160 attr->ipv6 = 1; 161 if (item->mask != NULL && 162 ((const struct rte_flow_item_ipv6 *) 163 item->mask)->hdr.proto) 164 next_protocol = 165 ((const struct rte_flow_item_ipv6 *) 166 (item->spec))->hdr.proto & 167 ((const struct rte_flow_item_ipv6 *) 168 (item->mask))->hdr.proto; 169 if ((next_protocol == IPPROTO_IPIP || 170 next_protocol == IPPROTO_IPV6) && tunnel_decap) 171 attr->attr = 0; 172 break; 173 case RTE_FLOW_ITEM_TYPE_UDP: 174 if (!attr->tcp) 175 attr->udp = 1; 176 break; 177 case RTE_FLOW_ITEM_TYPE_TCP: 178 if (!attr->udp) 179 attr->tcp = 1; 180 break; 181 default: 182 break; 183 } 184 } 185 attr->valid = 1; 186 } 187 188 /** 189 * Convert rte_mtr_color to mlx5 color. 190 * 191 * @param[in] rcol 192 * rte_mtr_color. 193 * 194 * @return 195 * mlx5 color. 196 */ 197 static int 198 rte_col_2_mlx5_col(enum rte_color rcol) 199 { 200 switch (rcol) { 201 case RTE_COLOR_GREEN: 202 return MLX5_FLOW_COLOR_GREEN; 203 case RTE_COLOR_YELLOW: 204 return MLX5_FLOW_COLOR_YELLOW; 205 case RTE_COLOR_RED: 206 return MLX5_FLOW_COLOR_RED; 207 default: 208 break; 209 } 210 return MLX5_FLOW_COLOR_UNDEFINED; 211 } 212 213 struct field_modify_info { 214 uint32_t size; /* Size of field in protocol header, in bytes. */ 215 uint32_t offset; /* Offset of field in protocol header, in bytes. */ 216 enum mlx5_modification_field id; 217 }; 218 219 struct field_modify_info modify_eth[] = { 220 {4, 0, MLX5_MODI_OUT_DMAC_47_16}, 221 {2, 4, MLX5_MODI_OUT_DMAC_15_0}, 222 {4, 6, MLX5_MODI_OUT_SMAC_47_16}, 223 {2, 10, MLX5_MODI_OUT_SMAC_15_0}, 224 {0, 0, 0}, 225 }; 226 227 struct field_modify_info modify_vlan_out_first_vid[] = { 228 /* Size in bits !!! */ 229 {12, 0, MLX5_MODI_OUT_FIRST_VID}, 230 {0, 0, 0}, 231 }; 232 233 struct field_modify_info modify_ipv4[] = { 234 {1, 1, MLX5_MODI_OUT_IP_DSCP}, 235 {1, 8, MLX5_MODI_OUT_IPV4_TTL}, 236 {4, 12, MLX5_MODI_OUT_SIPV4}, 237 {4, 16, MLX5_MODI_OUT_DIPV4}, 238 {0, 0, 0}, 239 }; 240 241 struct field_modify_info modify_ipv6[] = { 242 {1, 0, MLX5_MODI_OUT_IP_DSCP}, 243 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT}, 244 {4, 8, MLX5_MODI_OUT_SIPV6_127_96}, 245 {4, 12, MLX5_MODI_OUT_SIPV6_95_64}, 246 {4, 16, MLX5_MODI_OUT_SIPV6_63_32}, 247 {4, 20, MLX5_MODI_OUT_SIPV6_31_0}, 248 {4, 24, MLX5_MODI_OUT_DIPV6_127_96}, 249 {4, 28, MLX5_MODI_OUT_DIPV6_95_64}, 250 {4, 32, MLX5_MODI_OUT_DIPV6_63_32}, 251 {4, 36, MLX5_MODI_OUT_DIPV6_31_0}, 252 {0, 0, 0}, 253 }; 254 255 struct field_modify_info modify_udp[] = { 256 {2, 0, MLX5_MODI_OUT_UDP_SPORT}, 257 {2, 2, MLX5_MODI_OUT_UDP_DPORT}, 258 {0, 0, 0}, 259 }; 260 261 struct field_modify_info modify_tcp[] = { 262 {2, 0, MLX5_MODI_OUT_TCP_SPORT}, 263 {2, 2, MLX5_MODI_OUT_TCP_DPORT}, 264 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM}, 265 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM}, 266 {0, 0, 0}, 267 }; 268 269 static void 270 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, 271 uint8_t next_protocol, uint64_t *item_flags, 272 int *tunnel) 273 { 274 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || 275 item->type == RTE_FLOW_ITEM_TYPE_IPV6); 276 if (next_protocol == IPPROTO_IPIP) { 277 *item_flags |= MLX5_FLOW_LAYER_IPIP; 278 *tunnel = 1; 279 } 280 if (next_protocol == IPPROTO_IPV6) { 281 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; 282 *tunnel = 1; 283 } 284 } 285 286 /* Update VLAN's VID/PCP based on input rte_flow_action. 287 * 288 * @param[in] action 289 * Pointer to struct rte_flow_action. 290 * @param[out] vlan 291 * Pointer to struct rte_vlan_hdr. 292 */ 293 static void 294 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action, 295 struct rte_vlan_hdr *vlan) 296 { 297 uint16_t vlan_tci; 298 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { 299 vlan_tci = 300 ((const struct rte_flow_action_of_set_vlan_pcp *) 301 action->conf)->vlan_pcp; 302 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT; 303 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 304 vlan->vlan_tci |= vlan_tci; 305 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { 306 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 307 vlan->vlan_tci |= rte_be_to_cpu_16 308 (((const struct rte_flow_action_of_set_vlan_vid *) 309 action->conf)->vlan_vid); 310 } 311 } 312 313 /** 314 * Fetch 1, 2, 3 or 4 byte field from the byte array 315 * and return as unsigned integer in host-endian format. 316 * 317 * @param[in] data 318 * Pointer to data array. 319 * @param[in] size 320 * Size of field to extract. 321 * 322 * @return 323 * converted field in host endian format. 324 */ 325 static inline uint32_t 326 flow_dv_fetch_field(const uint8_t *data, uint32_t size) 327 { 328 uint32_t ret; 329 330 switch (size) { 331 case 1: 332 ret = *data; 333 break; 334 case 2: 335 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 336 break; 337 case 3: 338 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 339 ret = (ret << 8) | *(data + sizeof(uint16_t)); 340 break; 341 case 4: 342 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); 343 break; 344 default: 345 MLX5_ASSERT(false); 346 ret = 0; 347 break; 348 } 349 return ret; 350 } 351 352 /** 353 * Convert modify-header action to DV specification. 354 * 355 * Data length of each action is determined by provided field description 356 * and the item mask. Data bit offset and width of each action is determined 357 * by provided item mask. 358 * 359 * @param[in] item 360 * Pointer to item specification. 361 * @param[in] field 362 * Pointer to field modification information. 363 * For MLX5_MODIFICATION_TYPE_SET specifies destination field. 364 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field. 365 * For MLX5_MODIFICATION_TYPE_COPY specifies source field. 366 * @param[in] dcopy 367 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type. 368 * Negative offset value sets the same offset as source offset. 369 * size field is ignored, value is taken from source field. 370 * @param[in,out] resource 371 * Pointer to the modify-header resource. 372 * @param[in] type 373 * Type of modification. 374 * @param[out] error 375 * Pointer to the error structure. 376 * 377 * @return 378 * 0 on success, a negative errno value otherwise and rte_errno is set. 379 */ 380 static int 381 flow_dv_convert_modify_action(struct rte_flow_item *item, 382 struct field_modify_info *field, 383 struct field_modify_info *dcopy, 384 struct mlx5_flow_dv_modify_hdr_resource *resource, 385 uint32_t type, struct rte_flow_error *error) 386 { 387 uint32_t i = resource->actions_num; 388 struct mlx5_modification_cmd *actions = resource->actions; 389 390 /* 391 * The item and mask are provided in big-endian format. 392 * The fields should be presented as in big-endian format either. 393 * Mask must be always present, it defines the actual field width. 394 */ 395 MLX5_ASSERT(item->mask); 396 MLX5_ASSERT(field->size); 397 do { 398 unsigned int size_b; 399 unsigned int off_b; 400 uint32_t mask; 401 uint32_t data; 402 403 if (i >= MLX5_MAX_MODIFY_NUM) 404 return rte_flow_error_set(error, EINVAL, 405 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 406 "too many items to modify"); 407 /* Fetch variable byte size mask from the array. */ 408 mask = flow_dv_fetch_field((const uint8_t *)item->mask + 409 field->offset, field->size); 410 if (!mask) { 411 ++field; 412 continue; 413 } 414 /* Deduce actual data width in bits from mask value. */ 415 off_b = rte_bsf32(mask); 416 size_b = sizeof(uint32_t) * CHAR_BIT - 417 off_b - __builtin_clz(mask); 418 MLX5_ASSERT(size_b); 419 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; 420 actions[i] = (struct mlx5_modification_cmd) { 421 .action_type = type, 422 .field = field->id, 423 .offset = off_b, 424 .length = size_b, 425 }; 426 /* Convert entire record to expected big-endian format. */ 427 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 428 if (type == MLX5_MODIFICATION_TYPE_COPY) { 429 MLX5_ASSERT(dcopy); 430 actions[i].dst_field = dcopy->id; 431 actions[i].dst_offset = 432 (int)dcopy->offset < 0 ? off_b : dcopy->offset; 433 /* Convert entire record to big-endian format. */ 434 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); 435 ++dcopy; 436 } else { 437 MLX5_ASSERT(item->spec); 438 data = flow_dv_fetch_field((const uint8_t *)item->spec + 439 field->offset, field->size); 440 /* Shift out the trailing masked bits from data. */ 441 data = (data & mask) >> off_b; 442 actions[i].data1 = rte_cpu_to_be_32(data); 443 } 444 ++i; 445 ++field; 446 } while (field->size); 447 if (resource->actions_num == i) 448 return rte_flow_error_set(error, EINVAL, 449 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 450 "invalid modification flow item"); 451 resource->actions_num = i; 452 return 0; 453 } 454 455 /** 456 * Convert modify-header set IPv4 address action to DV specification. 457 * 458 * @param[in,out] resource 459 * Pointer to the modify-header resource. 460 * @param[in] action 461 * Pointer to action specification. 462 * @param[out] error 463 * Pointer to the error structure. 464 * 465 * @return 466 * 0 on success, a negative errno value otherwise and rte_errno is set. 467 */ 468 static int 469 flow_dv_convert_action_modify_ipv4 470 (struct mlx5_flow_dv_modify_hdr_resource *resource, 471 const struct rte_flow_action *action, 472 struct rte_flow_error *error) 473 { 474 const struct rte_flow_action_set_ipv4 *conf = 475 (const struct rte_flow_action_set_ipv4 *)(action->conf); 476 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 477 struct rte_flow_item_ipv4 ipv4; 478 struct rte_flow_item_ipv4 ipv4_mask; 479 480 memset(&ipv4, 0, sizeof(ipv4)); 481 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 482 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) { 483 ipv4.hdr.src_addr = conf->ipv4_addr; 484 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr; 485 } else { 486 ipv4.hdr.dst_addr = conf->ipv4_addr; 487 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr; 488 } 489 item.spec = &ipv4; 490 item.mask = &ipv4_mask; 491 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 492 MLX5_MODIFICATION_TYPE_SET, error); 493 } 494 495 /** 496 * Convert modify-header set IPv6 address action to DV specification. 497 * 498 * @param[in,out] resource 499 * Pointer to the modify-header resource. 500 * @param[in] action 501 * Pointer to action specification. 502 * @param[out] error 503 * Pointer to the error structure. 504 * 505 * @return 506 * 0 on success, a negative errno value otherwise and rte_errno is set. 507 */ 508 static int 509 flow_dv_convert_action_modify_ipv6 510 (struct mlx5_flow_dv_modify_hdr_resource *resource, 511 const struct rte_flow_action *action, 512 struct rte_flow_error *error) 513 { 514 const struct rte_flow_action_set_ipv6 *conf = 515 (const struct rte_flow_action_set_ipv6 *)(action->conf); 516 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 517 struct rte_flow_item_ipv6 ipv6; 518 struct rte_flow_item_ipv6 ipv6_mask; 519 520 memset(&ipv6, 0, sizeof(ipv6)); 521 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 522 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) { 523 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr, 524 sizeof(ipv6.hdr.src_addr)); 525 memcpy(&ipv6_mask.hdr.src_addr, 526 &rte_flow_item_ipv6_mask.hdr.src_addr, 527 sizeof(ipv6.hdr.src_addr)); 528 } else { 529 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr, 530 sizeof(ipv6.hdr.dst_addr)); 531 memcpy(&ipv6_mask.hdr.dst_addr, 532 &rte_flow_item_ipv6_mask.hdr.dst_addr, 533 sizeof(ipv6.hdr.dst_addr)); 534 } 535 item.spec = &ipv6; 536 item.mask = &ipv6_mask; 537 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 538 MLX5_MODIFICATION_TYPE_SET, error); 539 } 540 541 /** 542 * Convert modify-header set MAC address action to DV specification. 543 * 544 * @param[in,out] resource 545 * Pointer to the modify-header resource. 546 * @param[in] action 547 * Pointer to action specification. 548 * @param[out] error 549 * Pointer to the error structure. 550 * 551 * @return 552 * 0 on success, a negative errno value otherwise and rte_errno is set. 553 */ 554 static int 555 flow_dv_convert_action_modify_mac 556 (struct mlx5_flow_dv_modify_hdr_resource *resource, 557 const struct rte_flow_action *action, 558 struct rte_flow_error *error) 559 { 560 const struct rte_flow_action_set_mac *conf = 561 (const struct rte_flow_action_set_mac *)(action->conf); 562 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH }; 563 struct rte_flow_item_eth eth; 564 struct rte_flow_item_eth eth_mask; 565 566 memset(ð, 0, sizeof(eth)); 567 memset(ð_mask, 0, sizeof(eth_mask)); 568 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) { 569 memcpy(ð.src.addr_bytes, &conf->mac_addr, 570 sizeof(eth.src.addr_bytes)); 571 memcpy(ð_mask.src.addr_bytes, 572 &rte_flow_item_eth_mask.src.addr_bytes, 573 sizeof(eth_mask.src.addr_bytes)); 574 } else { 575 memcpy(ð.dst.addr_bytes, &conf->mac_addr, 576 sizeof(eth.dst.addr_bytes)); 577 memcpy(ð_mask.dst.addr_bytes, 578 &rte_flow_item_eth_mask.dst.addr_bytes, 579 sizeof(eth_mask.dst.addr_bytes)); 580 } 581 item.spec = ð 582 item.mask = ð_mask; 583 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource, 584 MLX5_MODIFICATION_TYPE_SET, error); 585 } 586 587 /** 588 * Convert modify-header set VLAN VID action to DV specification. 589 * 590 * @param[in,out] resource 591 * Pointer to the modify-header resource. 592 * @param[in] action 593 * Pointer to action specification. 594 * @param[out] error 595 * Pointer to the error structure. 596 * 597 * @return 598 * 0 on success, a negative errno value otherwise and rte_errno is set. 599 */ 600 static int 601 flow_dv_convert_action_modify_vlan_vid 602 (struct mlx5_flow_dv_modify_hdr_resource *resource, 603 const struct rte_flow_action *action, 604 struct rte_flow_error *error) 605 { 606 const struct rte_flow_action_of_set_vlan_vid *conf = 607 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); 608 int i = resource->actions_num; 609 struct mlx5_modification_cmd *actions = resource->actions; 610 struct field_modify_info *field = modify_vlan_out_first_vid; 611 612 if (i >= MLX5_MAX_MODIFY_NUM) 613 return rte_flow_error_set(error, EINVAL, 614 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 615 "too many items to modify"); 616 actions[i] = (struct mlx5_modification_cmd) { 617 .action_type = MLX5_MODIFICATION_TYPE_SET, 618 .field = field->id, 619 .length = field->size, 620 .offset = field->offset, 621 }; 622 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 623 actions[i].data1 = conf->vlan_vid; 624 actions[i].data1 = actions[i].data1 << 16; 625 resource->actions_num = ++i; 626 return 0; 627 } 628 629 /** 630 * Convert modify-header set TP action to DV specification. 631 * 632 * @param[in,out] resource 633 * Pointer to the modify-header resource. 634 * @param[in] action 635 * Pointer to action specification. 636 * @param[in] items 637 * Pointer to rte_flow_item objects list. 638 * @param[in] attr 639 * Pointer to flow attributes structure. 640 * @param[in] dev_flow 641 * Pointer to the sub flow. 642 * @param[in] tunnel_decap 643 * Whether action is after tunnel decapsulation. 644 * @param[out] error 645 * Pointer to the error structure. 646 * 647 * @return 648 * 0 on success, a negative errno value otherwise and rte_errno is set. 649 */ 650 static int 651 flow_dv_convert_action_modify_tp 652 (struct mlx5_flow_dv_modify_hdr_resource *resource, 653 const struct rte_flow_action *action, 654 const struct rte_flow_item *items, 655 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 656 bool tunnel_decap, struct rte_flow_error *error) 657 { 658 const struct rte_flow_action_set_tp *conf = 659 (const struct rte_flow_action_set_tp *)(action->conf); 660 struct rte_flow_item item; 661 struct rte_flow_item_udp udp; 662 struct rte_flow_item_udp udp_mask; 663 struct rte_flow_item_tcp tcp; 664 struct rte_flow_item_tcp tcp_mask; 665 struct field_modify_info *field; 666 667 if (!attr->valid) 668 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 669 if (attr->udp) { 670 memset(&udp, 0, sizeof(udp)); 671 memset(&udp_mask, 0, sizeof(udp_mask)); 672 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 673 udp.hdr.src_port = conf->port; 674 udp_mask.hdr.src_port = 675 rte_flow_item_udp_mask.hdr.src_port; 676 } else { 677 udp.hdr.dst_port = conf->port; 678 udp_mask.hdr.dst_port = 679 rte_flow_item_udp_mask.hdr.dst_port; 680 } 681 item.type = RTE_FLOW_ITEM_TYPE_UDP; 682 item.spec = &udp; 683 item.mask = &udp_mask; 684 field = modify_udp; 685 } else { 686 MLX5_ASSERT(attr->tcp); 687 memset(&tcp, 0, sizeof(tcp)); 688 memset(&tcp_mask, 0, sizeof(tcp_mask)); 689 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 690 tcp.hdr.src_port = conf->port; 691 tcp_mask.hdr.src_port = 692 rte_flow_item_tcp_mask.hdr.src_port; 693 } else { 694 tcp.hdr.dst_port = conf->port; 695 tcp_mask.hdr.dst_port = 696 rte_flow_item_tcp_mask.hdr.dst_port; 697 } 698 item.type = RTE_FLOW_ITEM_TYPE_TCP; 699 item.spec = &tcp; 700 item.mask = &tcp_mask; 701 field = modify_tcp; 702 } 703 return flow_dv_convert_modify_action(&item, field, NULL, resource, 704 MLX5_MODIFICATION_TYPE_SET, error); 705 } 706 707 /** 708 * Convert modify-header set TTL action to DV specification. 709 * 710 * @param[in,out] resource 711 * Pointer to the modify-header resource. 712 * @param[in] action 713 * Pointer to action specification. 714 * @param[in] items 715 * Pointer to rte_flow_item objects list. 716 * @param[in] attr 717 * Pointer to flow attributes structure. 718 * @param[in] dev_flow 719 * Pointer to the sub flow. 720 * @param[in] tunnel_decap 721 * Whether action is after tunnel decapsulation. 722 * @param[out] error 723 * Pointer to the error structure. 724 * 725 * @return 726 * 0 on success, a negative errno value otherwise and rte_errno is set. 727 */ 728 static int 729 flow_dv_convert_action_modify_ttl 730 (struct mlx5_flow_dv_modify_hdr_resource *resource, 731 const struct rte_flow_action *action, 732 const struct rte_flow_item *items, 733 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 734 bool tunnel_decap, struct rte_flow_error *error) 735 { 736 const struct rte_flow_action_set_ttl *conf = 737 (const struct rte_flow_action_set_ttl *)(action->conf); 738 struct rte_flow_item item; 739 struct rte_flow_item_ipv4 ipv4; 740 struct rte_flow_item_ipv4 ipv4_mask; 741 struct rte_flow_item_ipv6 ipv6; 742 struct rte_flow_item_ipv6 ipv6_mask; 743 struct field_modify_info *field; 744 745 if (!attr->valid) 746 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 747 if (attr->ipv4) { 748 memset(&ipv4, 0, sizeof(ipv4)); 749 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 750 ipv4.hdr.time_to_live = conf->ttl_value; 751 ipv4_mask.hdr.time_to_live = 0xFF; 752 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 753 item.spec = &ipv4; 754 item.mask = &ipv4_mask; 755 field = modify_ipv4; 756 } else { 757 MLX5_ASSERT(attr->ipv6); 758 memset(&ipv6, 0, sizeof(ipv6)); 759 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 760 ipv6.hdr.hop_limits = conf->ttl_value; 761 ipv6_mask.hdr.hop_limits = 0xFF; 762 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 763 item.spec = &ipv6; 764 item.mask = &ipv6_mask; 765 field = modify_ipv6; 766 } 767 return flow_dv_convert_modify_action(&item, field, NULL, resource, 768 MLX5_MODIFICATION_TYPE_SET, error); 769 } 770 771 /** 772 * Convert modify-header decrement TTL action to DV specification. 773 * 774 * @param[in,out] resource 775 * Pointer to the modify-header resource. 776 * @param[in] action 777 * Pointer to action specification. 778 * @param[in] items 779 * Pointer to rte_flow_item objects list. 780 * @param[in] attr 781 * Pointer to flow attributes structure. 782 * @param[in] dev_flow 783 * Pointer to the sub flow. 784 * @param[in] tunnel_decap 785 * Whether action is after tunnel decapsulation. 786 * @param[out] error 787 * Pointer to the error structure. 788 * 789 * @return 790 * 0 on success, a negative errno value otherwise and rte_errno is set. 791 */ 792 static int 793 flow_dv_convert_action_modify_dec_ttl 794 (struct mlx5_flow_dv_modify_hdr_resource *resource, 795 const struct rte_flow_item *items, 796 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 797 bool tunnel_decap, struct rte_flow_error *error) 798 { 799 struct rte_flow_item item; 800 struct rte_flow_item_ipv4 ipv4; 801 struct rte_flow_item_ipv4 ipv4_mask; 802 struct rte_flow_item_ipv6 ipv6; 803 struct rte_flow_item_ipv6 ipv6_mask; 804 struct field_modify_info *field; 805 806 if (!attr->valid) 807 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 808 if (attr->ipv4) { 809 memset(&ipv4, 0, sizeof(ipv4)); 810 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 811 ipv4.hdr.time_to_live = 0xFF; 812 ipv4_mask.hdr.time_to_live = 0xFF; 813 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 814 item.spec = &ipv4; 815 item.mask = &ipv4_mask; 816 field = modify_ipv4; 817 } else { 818 MLX5_ASSERT(attr->ipv6); 819 memset(&ipv6, 0, sizeof(ipv6)); 820 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 821 ipv6.hdr.hop_limits = 0xFF; 822 ipv6_mask.hdr.hop_limits = 0xFF; 823 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 824 item.spec = &ipv6; 825 item.mask = &ipv6_mask; 826 field = modify_ipv6; 827 } 828 return flow_dv_convert_modify_action(&item, field, NULL, resource, 829 MLX5_MODIFICATION_TYPE_ADD, error); 830 } 831 832 /** 833 * Convert modify-header increment/decrement TCP Sequence number 834 * to DV specification. 835 * 836 * @param[in,out] resource 837 * Pointer to the modify-header resource. 838 * @param[in] action 839 * Pointer to action specification. 840 * @param[out] error 841 * Pointer to the error structure. 842 * 843 * @return 844 * 0 on success, a negative errno value otherwise and rte_errno is set. 845 */ 846 static int 847 flow_dv_convert_action_modify_tcp_seq 848 (struct mlx5_flow_dv_modify_hdr_resource *resource, 849 const struct rte_flow_action *action, 850 struct rte_flow_error *error) 851 { 852 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 853 uint64_t value = rte_be_to_cpu_32(*conf); 854 struct rte_flow_item item; 855 struct rte_flow_item_tcp tcp; 856 struct rte_flow_item_tcp tcp_mask; 857 858 memset(&tcp, 0, sizeof(tcp)); 859 memset(&tcp_mask, 0, sizeof(tcp_mask)); 860 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ) 861 /* 862 * The HW has no decrement operation, only increment operation. 863 * To simulate decrement X from Y using increment operation 864 * we need to add UINT32_MAX X times to Y. 865 * Each adding of UINT32_MAX decrements Y by 1. 866 */ 867 value *= UINT32_MAX; 868 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value); 869 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX); 870 item.type = RTE_FLOW_ITEM_TYPE_TCP; 871 item.spec = &tcp; 872 item.mask = &tcp_mask; 873 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 874 MLX5_MODIFICATION_TYPE_ADD, error); 875 } 876 877 /** 878 * Convert modify-header increment/decrement TCP Acknowledgment number 879 * to DV specification. 880 * 881 * @param[in,out] resource 882 * Pointer to the modify-header resource. 883 * @param[in] action 884 * Pointer to action specification. 885 * @param[out] error 886 * Pointer to the error structure. 887 * 888 * @return 889 * 0 on success, a negative errno value otherwise and rte_errno is set. 890 */ 891 static int 892 flow_dv_convert_action_modify_tcp_ack 893 (struct mlx5_flow_dv_modify_hdr_resource *resource, 894 const struct rte_flow_action *action, 895 struct rte_flow_error *error) 896 { 897 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 898 uint64_t value = rte_be_to_cpu_32(*conf); 899 struct rte_flow_item item; 900 struct rte_flow_item_tcp tcp; 901 struct rte_flow_item_tcp tcp_mask; 902 903 memset(&tcp, 0, sizeof(tcp)); 904 memset(&tcp_mask, 0, sizeof(tcp_mask)); 905 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK) 906 /* 907 * The HW has no decrement operation, only increment operation. 908 * To simulate decrement X from Y using increment operation 909 * we need to add UINT32_MAX X times to Y. 910 * Each adding of UINT32_MAX decrements Y by 1. 911 */ 912 value *= UINT32_MAX; 913 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value); 914 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX); 915 item.type = RTE_FLOW_ITEM_TYPE_TCP; 916 item.spec = &tcp; 917 item.mask = &tcp_mask; 918 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 919 MLX5_MODIFICATION_TYPE_ADD, error); 920 } 921 922 static enum mlx5_modification_field reg_to_field[] = { 923 [REG_NON] = MLX5_MODI_OUT_NONE, 924 [REG_A] = MLX5_MODI_META_DATA_REG_A, 925 [REG_B] = MLX5_MODI_META_DATA_REG_B, 926 [REG_C_0] = MLX5_MODI_META_REG_C_0, 927 [REG_C_1] = MLX5_MODI_META_REG_C_1, 928 [REG_C_2] = MLX5_MODI_META_REG_C_2, 929 [REG_C_3] = MLX5_MODI_META_REG_C_3, 930 [REG_C_4] = MLX5_MODI_META_REG_C_4, 931 [REG_C_5] = MLX5_MODI_META_REG_C_5, 932 [REG_C_6] = MLX5_MODI_META_REG_C_6, 933 [REG_C_7] = MLX5_MODI_META_REG_C_7, 934 }; 935 936 /** 937 * Convert register set to DV specification. 938 * 939 * @param[in,out] resource 940 * Pointer to the modify-header resource. 941 * @param[in] action 942 * Pointer to action specification. 943 * @param[out] error 944 * Pointer to the error structure. 945 * 946 * @return 947 * 0 on success, a negative errno value otherwise and rte_errno is set. 948 */ 949 static int 950 flow_dv_convert_action_set_reg 951 (struct mlx5_flow_dv_modify_hdr_resource *resource, 952 const struct rte_flow_action *action, 953 struct rte_flow_error *error) 954 { 955 const struct mlx5_rte_flow_action_set_tag *conf = action->conf; 956 struct mlx5_modification_cmd *actions = resource->actions; 957 uint32_t i = resource->actions_num; 958 959 if (i >= MLX5_MAX_MODIFY_NUM) 960 return rte_flow_error_set(error, EINVAL, 961 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 962 "too many items to modify"); 963 MLX5_ASSERT(conf->id != REG_NON); 964 MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field)); 965 actions[i] = (struct mlx5_modification_cmd) { 966 .action_type = MLX5_MODIFICATION_TYPE_SET, 967 .field = reg_to_field[conf->id], 968 }; 969 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 970 actions[i].data1 = rte_cpu_to_be_32(conf->data); 971 ++i; 972 resource->actions_num = i; 973 return 0; 974 } 975 976 /** 977 * Convert SET_TAG action to DV specification. 978 * 979 * @param[in] dev 980 * Pointer to the rte_eth_dev structure. 981 * @param[in,out] resource 982 * Pointer to the modify-header resource. 983 * @param[in] conf 984 * Pointer to action specification. 985 * @param[out] error 986 * Pointer to the error structure. 987 * 988 * @return 989 * 0 on success, a negative errno value otherwise and rte_errno is set. 990 */ 991 static int 992 flow_dv_convert_action_set_tag 993 (struct rte_eth_dev *dev, 994 struct mlx5_flow_dv_modify_hdr_resource *resource, 995 const struct rte_flow_action_set_tag *conf, 996 struct rte_flow_error *error) 997 { 998 rte_be32_t data = rte_cpu_to_be_32(conf->data); 999 rte_be32_t mask = rte_cpu_to_be_32(conf->mask); 1000 struct rte_flow_item item = { 1001 .spec = &data, 1002 .mask = &mask, 1003 }; 1004 struct field_modify_info reg_c_x[] = { 1005 [1] = {0, 0, 0}, 1006 }; 1007 enum mlx5_modification_field reg_type; 1008 int ret; 1009 1010 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 1011 if (ret < 0) 1012 return ret; 1013 MLX5_ASSERT(ret != REG_NON); 1014 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); 1015 reg_type = reg_to_field[ret]; 1016 MLX5_ASSERT(reg_type > 0); 1017 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type}; 1018 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1019 MLX5_MODIFICATION_TYPE_SET, error); 1020 } 1021 1022 /** 1023 * Convert internal COPY_REG action to DV specification. 1024 * 1025 * @param[in] dev 1026 * Pointer to the rte_eth_dev structure. 1027 * @param[in,out] res 1028 * Pointer to the modify-header resource. 1029 * @param[in] action 1030 * Pointer to action specification. 1031 * @param[out] error 1032 * Pointer to the error structure. 1033 * 1034 * @return 1035 * 0 on success, a negative errno value otherwise and rte_errno is set. 1036 */ 1037 static int 1038 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev, 1039 struct mlx5_flow_dv_modify_hdr_resource *res, 1040 const struct rte_flow_action *action, 1041 struct rte_flow_error *error) 1042 { 1043 const struct mlx5_flow_action_copy_mreg *conf = action->conf; 1044 rte_be32_t mask = RTE_BE32(UINT32_MAX); 1045 struct rte_flow_item item = { 1046 .spec = NULL, 1047 .mask = &mask, 1048 }; 1049 struct field_modify_info reg_src[] = { 1050 {4, 0, reg_to_field[conf->src]}, 1051 {0, 0, 0}, 1052 }; 1053 struct field_modify_info reg_dst = { 1054 .offset = 0, 1055 .id = reg_to_field[conf->dst], 1056 }; 1057 /* Adjust reg_c[0] usage according to reported mask. */ 1058 if (conf->dst == REG_C_0 || conf->src == REG_C_0) { 1059 struct mlx5_priv *priv = dev->data->dev_private; 1060 uint32_t reg_c0 = priv->sh->dv_regc0_mask; 1061 1062 MLX5_ASSERT(reg_c0); 1063 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY); 1064 if (conf->dst == REG_C_0) { 1065 /* Copy to reg_c[0], within mask only. */ 1066 reg_dst.offset = rte_bsf32(reg_c0); 1067 /* 1068 * Mask is ignoring the enianness, because 1069 * there is no conversion in datapath. 1070 */ 1071 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1072 /* Copy from destination lower bits to reg_c[0]. */ 1073 mask = reg_c0 >> reg_dst.offset; 1074 #else 1075 /* Copy from destination upper bits to reg_c[0]. */ 1076 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT - 1077 rte_fls_u32(reg_c0)); 1078 #endif 1079 } else { 1080 mask = rte_cpu_to_be_32(reg_c0); 1081 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1082 /* Copy from reg_c[0] to destination lower bits. */ 1083 reg_dst.offset = 0; 1084 #else 1085 /* Copy from reg_c[0] to destination upper bits. */ 1086 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT - 1087 (rte_fls_u32(reg_c0) - 1088 rte_bsf32(reg_c0)); 1089 #endif 1090 } 1091 } 1092 return flow_dv_convert_modify_action(&item, 1093 reg_src, ®_dst, res, 1094 MLX5_MODIFICATION_TYPE_COPY, 1095 error); 1096 } 1097 1098 /** 1099 * Convert MARK action to DV specification. This routine is used 1100 * in extensive metadata only and requires metadata register to be 1101 * handled. In legacy mode hardware tag resource is engaged. 1102 * 1103 * @param[in] dev 1104 * Pointer to the rte_eth_dev structure. 1105 * @param[in] conf 1106 * Pointer to MARK action specification. 1107 * @param[in,out] resource 1108 * Pointer to the modify-header resource. 1109 * @param[out] error 1110 * Pointer to the error structure. 1111 * 1112 * @return 1113 * 0 on success, a negative errno value otherwise and rte_errno is set. 1114 */ 1115 static int 1116 flow_dv_convert_action_mark(struct rte_eth_dev *dev, 1117 const struct rte_flow_action_mark *conf, 1118 struct mlx5_flow_dv_modify_hdr_resource *resource, 1119 struct rte_flow_error *error) 1120 { 1121 struct mlx5_priv *priv = dev->data->dev_private; 1122 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK & 1123 priv->sh->dv_mark_mask); 1124 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask; 1125 struct rte_flow_item item = { 1126 .spec = &data, 1127 .mask = &mask, 1128 }; 1129 struct field_modify_info reg_c_x[] = { 1130 [1] = {0, 0, 0}, 1131 }; 1132 int reg; 1133 1134 if (!mask) 1135 return rte_flow_error_set(error, EINVAL, 1136 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1137 NULL, "zero mark action mask"); 1138 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1139 if (reg < 0) 1140 return reg; 1141 MLX5_ASSERT(reg > 0); 1142 if (reg == REG_C_0) { 1143 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1144 uint32_t shl_c0 = rte_bsf32(msk_c0); 1145 1146 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); 1147 mask = rte_cpu_to_be_32(mask) & msk_c0; 1148 mask = rte_cpu_to_be_32(mask << shl_c0); 1149 } 1150 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; 1151 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1152 MLX5_MODIFICATION_TYPE_SET, error); 1153 } 1154 1155 /** 1156 * Get metadata register index for specified steering domain. 1157 * 1158 * @param[in] dev 1159 * Pointer to the rte_eth_dev structure. 1160 * @param[in] attr 1161 * Attributes of flow to determine steering domain. 1162 * @param[out] error 1163 * Pointer to the error structure. 1164 * 1165 * @return 1166 * positive index on success, a negative errno value otherwise 1167 * and rte_errno is set. 1168 */ 1169 static enum modify_reg 1170 flow_dv_get_metadata_reg(struct rte_eth_dev *dev, 1171 const struct rte_flow_attr *attr, 1172 struct rte_flow_error *error) 1173 { 1174 int reg = 1175 mlx5_flow_get_reg_id(dev, attr->transfer ? 1176 MLX5_METADATA_FDB : 1177 attr->egress ? 1178 MLX5_METADATA_TX : 1179 MLX5_METADATA_RX, 0, error); 1180 if (reg < 0) 1181 return rte_flow_error_set(error, 1182 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 1183 NULL, "unavailable " 1184 "metadata register"); 1185 return reg; 1186 } 1187 1188 /** 1189 * Convert SET_META action to DV specification. 1190 * 1191 * @param[in] dev 1192 * Pointer to the rte_eth_dev structure. 1193 * @param[in,out] resource 1194 * Pointer to the modify-header resource. 1195 * @param[in] attr 1196 * Attributes of flow that includes this item. 1197 * @param[in] conf 1198 * Pointer to action specification. 1199 * @param[out] error 1200 * Pointer to the error structure. 1201 * 1202 * @return 1203 * 0 on success, a negative errno value otherwise and rte_errno is set. 1204 */ 1205 static int 1206 flow_dv_convert_action_set_meta 1207 (struct rte_eth_dev *dev, 1208 struct mlx5_flow_dv_modify_hdr_resource *resource, 1209 const struct rte_flow_attr *attr, 1210 const struct rte_flow_action_set_meta *conf, 1211 struct rte_flow_error *error) 1212 { 1213 uint32_t data = conf->data; 1214 uint32_t mask = conf->mask; 1215 struct rte_flow_item item = { 1216 .spec = &data, 1217 .mask = &mask, 1218 }; 1219 struct field_modify_info reg_c_x[] = { 1220 [1] = {0, 0, 0}, 1221 }; 1222 int reg = flow_dv_get_metadata_reg(dev, attr, error); 1223 1224 if (reg < 0) 1225 return reg; 1226 MLX5_ASSERT(reg != REG_NON); 1227 /* 1228 * In datapath code there is no endianness 1229 * coversions for perfromance reasons, all 1230 * pattern conversions are done in rte_flow. 1231 */ 1232 if (reg == REG_C_0) { 1233 struct mlx5_priv *priv = dev->data->dev_private; 1234 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1235 uint32_t shl_c0; 1236 1237 MLX5_ASSERT(msk_c0); 1238 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1239 shl_c0 = rte_bsf32(msk_c0); 1240 #else 1241 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); 1242 #endif 1243 mask <<= shl_c0; 1244 data <<= shl_c0; 1245 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); 1246 } 1247 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; 1248 /* The routine expects parameters in memory as big-endian ones. */ 1249 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1250 MLX5_MODIFICATION_TYPE_SET, error); 1251 } 1252 1253 /** 1254 * Convert modify-header set IPv4 DSCP action to DV specification. 1255 * 1256 * @param[in,out] resource 1257 * Pointer to the modify-header resource. 1258 * @param[in] action 1259 * Pointer to action specification. 1260 * @param[out] error 1261 * Pointer to the error structure. 1262 * 1263 * @return 1264 * 0 on success, a negative errno value otherwise and rte_errno is set. 1265 */ 1266 static int 1267 flow_dv_convert_action_modify_ipv4_dscp 1268 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1269 const struct rte_flow_action *action, 1270 struct rte_flow_error *error) 1271 { 1272 const struct rte_flow_action_set_dscp *conf = 1273 (const struct rte_flow_action_set_dscp *)(action->conf); 1274 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 1275 struct rte_flow_item_ipv4 ipv4; 1276 struct rte_flow_item_ipv4 ipv4_mask; 1277 1278 memset(&ipv4, 0, sizeof(ipv4)); 1279 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 1280 ipv4.hdr.type_of_service = conf->dscp; 1281 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2; 1282 item.spec = &ipv4; 1283 item.mask = &ipv4_mask; 1284 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 1285 MLX5_MODIFICATION_TYPE_SET, error); 1286 } 1287 1288 /** 1289 * Convert modify-header set IPv6 DSCP action to DV specification. 1290 * 1291 * @param[in,out] resource 1292 * Pointer to the modify-header resource. 1293 * @param[in] action 1294 * Pointer to action specification. 1295 * @param[out] error 1296 * Pointer to the error structure. 1297 * 1298 * @return 1299 * 0 on success, a negative errno value otherwise and rte_errno is set. 1300 */ 1301 static int 1302 flow_dv_convert_action_modify_ipv6_dscp 1303 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1304 const struct rte_flow_action *action, 1305 struct rte_flow_error *error) 1306 { 1307 const struct rte_flow_action_set_dscp *conf = 1308 (const struct rte_flow_action_set_dscp *)(action->conf); 1309 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 1310 struct rte_flow_item_ipv6 ipv6; 1311 struct rte_flow_item_ipv6 ipv6_mask; 1312 1313 memset(&ipv6, 0, sizeof(ipv6)); 1314 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 1315 /* 1316 * Even though the DSCP bits offset of IPv6 is not byte aligned, 1317 * rdma-core only accept the DSCP bits byte aligned start from 1318 * bit 0 to 5 as to be compatible with IPv4. No need to shift the 1319 * bits in IPv6 case as rdma-core requires byte aligned value. 1320 */ 1321 ipv6.hdr.vtc_flow = conf->dscp; 1322 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22; 1323 item.spec = &ipv6; 1324 item.mask = &ipv6_mask; 1325 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 1326 MLX5_MODIFICATION_TYPE_SET, error); 1327 } 1328 1329 static int 1330 mlx5_flow_item_field_width(enum rte_flow_field_id field) 1331 { 1332 switch (field) { 1333 case RTE_FLOW_FIELD_START: 1334 return 32; 1335 case RTE_FLOW_FIELD_MAC_DST: 1336 case RTE_FLOW_FIELD_MAC_SRC: 1337 return 48; 1338 case RTE_FLOW_FIELD_VLAN_TYPE: 1339 return 16; 1340 case RTE_FLOW_FIELD_VLAN_ID: 1341 return 12; 1342 case RTE_FLOW_FIELD_MAC_TYPE: 1343 return 16; 1344 case RTE_FLOW_FIELD_IPV4_DSCP: 1345 return 6; 1346 case RTE_FLOW_FIELD_IPV4_TTL: 1347 return 8; 1348 case RTE_FLOW_FIELD_IPV4_SRC: 1349 case RTE_FLOW_FIELD_IPV4_DST: 1350 return 32; 1351 case RTE_FLOW_FIELD_IPV6_DSCP: 1352 return 6; 1353 case RTE_FLOW_FIELD_IPV6_HOPLIMIT: 1354 return 8; 1355 case RTE_FLOW_FIELD_IPV6_SRC: 1356 case RTE_FLOW_FIELD_IPV6_DST: 1357 return 128; 1358 case RTE_FLOW_FIELD_TCP_PORT_SRC: 1359 case RTE_FLOW_FIELD_TCP_PORT_DST: 1360 return 16; 1361 case RTE_FLOW_FIELD_TCP_SEQ_NUM: 1362 case RTE_FLOW_FIELD_TCP_ACK_NUM: 1363 return 32; 1364 case RTE_FLOW_FIELD_TCP_FLAGS: 1365 return 6; 1366 case RTE_FLOW_FIELD_UDP_PORT_SRC: 1367 case RTE_FLOW_FIELD_UDP_PORT_DST: 1368 return 16; 1369 case RTE_FLOW_FIELD_VXLAN_VNI: 1370 case RTE_FLOW_FIELD_GENEVE_VNI: 1371 return 24; 1372 case RTE_FLOW_FIELD_GTP_TEID: 1373 case RTE_FLOW_FIELD_TAG: 1374 return 32; 1375 case RTE_FLOW_FIELD_MARK: 1376 return 24; 1377 case RTE_FLOW_FIELD_META: 1378 return 32; 1379 case RTE_FLOW_FIELD_POINTER: 1380 case RTE_FLOW_FIELD_VALUE: 1381 return 64; 1382 default: 1383 MLX5_ASSERT(false); 1384 } 1385 return 0; 1386 } 1387 1388 static void 1389 mlx5_flow_field_id_to_modify_info 1390 (const struct rte_flow_action_modify_data *data, 1391 struct field_modify_info *info, 1392 uint32_t *mask, uint32_t *value, 1393 uint32_t width, uint32_t dst_width, 1394 struct rte_eth_dev *dev, 1395 const struct rte_flow_attr *attr, 1396 struct rte_flow_error *error) 1397 { 1398 uint32_t idx = 0; 1399 uint64_t val = 0; 1400 switch (data->field) { 1401 case RTE_FLOW_FIELD_START: 1402 /* not supported yet */ 1403 MLX5_ASSERT(false); 1404 break; 1405 case RTE_FLOW_FIELD_MAC_DST: 1406 if (mask) { 1407 if (data->offset < 32) { 1408 info[idx] = (struct field_modify_info){4, 0, 1409 MLX5_MODI_OUT_DMAC_47_16}; 1410 if (width < 32) { 1411 mask[idx] = 1412 rte_cpu_to_be_32(0xffffffff >> 1413 (32 - width)); 1414 width = 0; 1415 } else { 1416 mask[idx] = RTE_BE32(0xffffffff); 1417 width -= 32; 1418 } 1419 if (!width) 1420 break; 1421 ++idx; 1422 } 1423 info[idx] = (struct field_modify_info){2, 4 * idx, 1424 MLX5_MODI_OUT_DMAC_15_0}; 1425 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1426 } else { 1427 if (data->offset < 32) 1428 info[idx++] = (struct field_modify_info){4, 0, 1429 MLX5_MODI_OUT_DMAC_47_16}; 1430 info[idx] = (struct field_modify_info){2, 0, 1431 MLX5_MODI_OUT_DMAC_15_0}; 1432 } 1433 break; 1434 case RTE_FLOW_FIELD_MAC_SRC: 1435 if (mask) { 1436 if (data->offset < 32) { 1437 info[idx] = (struct field_modify_info){4, 0, 1438 MLX5_MODI_OUT_SMAC_47_16}; 1439 if (width < 32) { 1440 mask[idx] = 1441 rte_cpu_to_be_32(0xffffffff >> 1442 (32 - width)); 1443 width = 0; 1444 } else { 1445 mask[idx] = RTE_BE32(0xffffffff); 1446 width -= 32; 1447 } 1448 if (!width) 1449 break; 1450 ++idx; 1451 } 1452 info[idx] = (struct field_modify_info){2, 4 * idx, 1453 MLX5_MODI_OUT_SMAC_15_0}; 1454 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1455 } else { 1456 if (data->offset < 32) 1457 info[idx++] = (struct field_modify_info){4, 0, 1458 MLX5_MODI_OUT_SMAC_47_16}; 1459 info[idx] = (struct field_modify_info){2, 0, 1460 MLX5_MODI_OUT_SMAC_15_0}; 1461 } 1462 break; 1463 case RTE_FLOW_FIELD_VLAN_TYPE: 1464 /* not supported yet */ 1465 break; 1466 case RTE_FLOW_FIELD_VLAN_ID: 1467 info[idx] = (struct field_modify_info){2, 0, 1468 MLX5_MODI_OUT_FIRST_VID}; 1469 if (mask) 1470 mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width)); 1471 break; 1472 case RTE_FLOW_FIELD_MAC_TYPE: 1473 info[idx] = (struct field_modify_info){2, 0, 1474 MLX5_MODI_OUT_ETHERTYPE}; 1475 if (mask) 1476 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1477 break; 1478 case RTE_FLOW_FIELD_IPV4_DSCP: 1479 info[idx] = (struct field_modify_info){1, 0, 1480 MLX5_MODI_OUT_IP_DSCP}; 1481 if (mask) 1482 mask[idx] = 0x3f >> (6 - width); 1483 break; 1484 case RTE_FLOW_FIELD_IPV4_TTL: 1485 info[idx] = (struct field_modify_info){1, 0, 1486 MLX5_MODI_OUT_IPV4_TTL}; 1487 if (mask) 1488 mask[idx] = 0xff >> (8 - width); 1489 break; 1490 case RTE_FLOW_FIELD_IPV4_SRC: 1491 info[idx] = (struct field_modify_info){4, 0, 1492 MLX5_MODI_OUT_SIPV4}; 1493 if (mask) 1494 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1495 (32 - width)); 1496 break; 1497 case RTE_FLOW_FIELD_IPV4_DST: 1498 info[idx] = (struct field_modify_info){4, 0, 1499 MLX5_MODI_OUT_DIPV4}; 1500 if (mask) 1501 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1502 (32 - width)); 1503 break; 1504 case RTE_FLOW_FIELD_IPV6_DSCP: 1505 info[idx] = (struct field_modify_info){1, 0, 1506 MLX5_MODI_OUT_IP_DSCP}; 1507 if (mask) 1508 mask[idx] = 0x3f >> (6 - width); 1509 break; 1510 case RTE_FLOW_FIELD_IPV6_HOPLIMIT: 1511 info[idx] = (struct field_modify_info){1, 0, 1512 MLX5_MODI_OUT_IPV6_HOPLIMIT}; 1513 if (mask) 1514 mask[idx] = 0xff >> (8 - width); 1515 break; 1516 case RTE_FLOW_FIELD_IPV6_SRC: 1517 if (mask) { 1518 if (data->offset < 32) { 1519 info[idx] = (struct field_modify_info){4, 1520 4 * idx, 1521 MLX5_MODI_OUT_SIPV6_31_0}; 1522 if (width < 32) { 1523 mask[idx] = 1524 rte_cpu_to_be_32(0xffffffff >> 1525 (32 - width)); 1526 width = 0; 1527 } else { 1528 mask[idx] = RTE_BE32(0xffffffff); 1529 width -= 32; 1530 } 1531 if (!width) 1532 break; 1533 ++idx; 1534 } 1535 if (data->offset < 64) { 1536 info[idx] = (struct field_modify_info){4, 1537 4 * idx, 1538 MLX5_MODI_OUT_SIPV6_63_32}; 1539 if (width < 32) { 1540 mask[idx] = 1541 rte_cpu_to_be_32(0xffffffff >> 1542 (32 - width)); 1543 width = 0; 1544 } else { 1545 mask[idx] = RTE_BE32(0xffffffff); 1546 width -= 32; 1547 } 1548 if (!width) 1549 break; 1550 ++idx; 1551 } 1552 if (data->offset < 96) { 1553 info[idx] = (struct field_modify_info){4, 1554 4 * idx, 1555 MLX5_MODI_OUT_SIPV6_95_64}; 1556 if (width < 32) { 1557 mask[idx] = 1558 rte_cpu_to_be_32(0xffffffff >> 1559 (32 - width)); 1560 width = 0; 1561 } else { 1562 mask[idx] = RTE_BE32(0xffffffff); 1563 width -= 32; 1564 } 1565 if (!width) 1566 break; 1567 ++idx; 1568 } 1569 info[idx] = (struct field_modify_info){4, 4 * idx, 1570 MLX5_MODI_OUT_SIPV6_127_96}; 1571 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1572 (32 - width)); 1573 } else { 1574 if (data->offset < 32) 1575 info[idx++] = (struct field_modify_info){4, 0, 1576 MLX5_MODI_OUT_SIPV6_31_0}; 1577 if (data->offset < 64) 1578 info[idx++] = (struct field_modify_info){4, 0, 1579 MLX5_MODI_OUT_SIPV6_63_32}; 1580 if (data->offset < 96) 1581 info[idx++] = (struct field_modify_info){4, 0, 1582 MLX5_MODI_OUT_SIPV6_95_64}; 1583 if (data->offset < 128) 1584 info[idx++] = (struct field_modify_info){4, 0, 1585 MLX5_MODI_OUT_SIPV6_127_96}; 1586 } 1587 break; 1588 case RTE_FLOW_FIELD_IPV6_DST: 1589 if (mask) { 1590 if (data->offset < 32) { 1591 info[idx] = (struct field_modify_info){4, 1592 4 * idx, 1593 MLX5_MODI_OUT_DIPV6_31_0}; 1594 if (width < 32) { 1595 mask[idx] = 1596 rte_cpu_to_be_32(0xffffffff >> 1597 (32 - width)); 1598 width = 0; 1599 } else { 1600 mask[idx] = RTE_BE32(0xffffffff); 1601 width -= 32; 1602 } 1603 if (!width) 1604 break; 1605 ++idx; 1606 } 1607 if (data->offset < 64) { 1608 info[idx] = (struct field_modify_info){4, 1609 4 * idx, 1610 MLX5_MODI_OUT_DIPV6_63_32}; 1611 if (width < 32) { 1612 mask[idx] = 1613 rte_cpu_to_be_32(0xffffffff >> 1614 (32 - width)); 1615 width = 0; 1616 } else { 1617 mask[idx] = RTE_BE32(0xffffffff); 1618 width -= 32; 1619 } 1620 if (!width) 1621 break; 1622 ++idx; 1623 } 1624 if (data->offset < 96) { 1625 info[idx] = (struct field_modify_info){4, 1626 4 * idx, 1627 MLX5_MODI_OUT_DIPV6_95_64}; 1628 if (width < 32) { 1629 mask[idx] = 1630 rte_cpu_to_be_32(0xffffffff >> 1631 (32 - width)); 1632 width = 0; 1633 } else { 1634 mask[idx] = RTE_BE32(0xffffffff); 1635 width -= 32; 1636 } 1637 if (!width) 1638 break; 1639 ++idx; 1640 } 1641 info[idx] = (struct field_modify_info){4, 4 * idx, 1642 MLX5_MODI_OUT_DIPV6_127_96}; 1643 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1644 (32 - width)); 1645 } else { 1646 if (data->offset < 32) 1647 info[idx++] = (struct field_modify_info){4, 0, 1648 MLX5_MODI_OUT_DIPV6_31_0}; 1649 if (data->offset < 64) 1650 info[idx++] = (struct field_modify_info){4, 0, 1651 MLX5_MODI_OUT_DIPV6_63_32}; 1652 if (data->offset < 96) 1653 info[idx++] = (struct field_modify_info){4, 0, 1654 MLX5_MODI_OUT_DIPV6_95_64}; 1655 if (data->offset < 128) 1656 info[idx++] = (struct field_modify_info){4, 0, 1657 MLX5_MODI_OUT_DIPV6_127_96}; 1658 } 1659 break; 1660 case RTE_FLOW_FIELD_TCP_PORT_SRC: 1661 info[idx] = (struct field_modify_info){2, 0, 1662 MLX5_MODI_OUT_TCP_SPORT}; 1663 if (mask) 1664 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1665 break; 1666 case RTE_FLOW_FIELD_TCP_PORT_DST: 1667 info[idx] = (struct field_modify_info){2, 0, 1668 MLX5_MODI_OUT_TCP_DPORT}; 1669 if (mask) 1670 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1671 break; 1672 case RTE_FLOW_FIELD_TCP_SEQ_NUM: 1673 info[idx] = (struct field_modify_info){4, 0, 1674 MLX5_MODI_OUT_TCP_SEQ_NUM}; 1675 if (mask) 1676 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1677 (32 - width)); 1678 break; 1679 case RTE_FLOW_FIELD_TCP_ACK_NUM: 1680 info[idx] = (struct field_modify_info){4, 0, 1681 MLX5_MODI_OUT_TCP_ACK_NUM}; 1682 if (mask) 1683 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1684 (32 - width)); 1685 break; 1686 case RTE_FLOW_FIELD_TCP_FLAGS: 1687 info[idx] = (struct field_modify_info){1, 0, 1688 MLX5_MODI_OUT_TCP_FLAGS}; 1689 if (mask) 1690 mask[idx] = 0x3f >> (6 - width); 1691 break; 1692 case RTE_FLOW_FIELD_UDP_PORT_SRC: 1693 info[idx] = (struct field_modify_info){2, 0, 1694 MLX5_MODI_OUT_UDP_SPORT}; 1695 if (mask) 1696 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1697 break; 1698 case RTE_FLOW_FIELD_UDP_PORT_DST: 1699 info[idx] = (struct field_modify_info){2, 0, 1700 MLX5_MODI_OUT_UDP_DPORT}; 1701 if (mask) 1702 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); 1703 break; 1704 case RTE_FLOW_FIELD_VXLAN_VNI: 1705 /* not supported yet */ 1706 break; 1707 case RTE_FLOW_FIELD_GENEVE_VNI: 1708 /* not supported yet*/ 1709 break; 1710 case RTE_FLOW_FIELD_GTP_TEID: 1711 info[idx] = (struct field_modify_info){4, 0, 1712 MLX5_MODI_GTP_TEID}; 1713 if (mask) 1714 mask[idx] = rte_cpu_to_be_32(0xffffffff >> 1715 (32 - width)); 1716 break; 1717 case RTE_FLOW_FIELD_TAG: 1718 { 1719 int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 1720 data->level, error); 1721 if (reg < 0) 1722 return; 1723 MLX5_ASSERT(reg != REG_NON); 1724 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); 1725 info[idx] = (struct field_modify_info){4, 0, 1726 reg_to_field[reg]}; 1727 if (mask) 1728 mask[idx] = 1729 rte_cpu_to_be_32(0xffffffff >> 1730 (32 - width)); 1731 } 1732 break; 1733 case RTE_FLOW_FIELD_MARK: 1734 { 1735 int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 1736 0, error); 1737 if (reg < 0) 1738 return; 1739 MLX5_ASSERT(reg != REG_NON); 1740 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); 1741 info[idx] = (struct field_modify_info){4, 0, 1742 reg_to_field[reg]}; 1743 if (mask) 1744 mask[idx] = 1745 rte_cpu_to_be_32(0xffffffff >> 1746 (32 - width)); 1747 } 1748 break; 1749 case RTE_FLOW_FIELD_META: 1750 { 1751 int reg = flow_dv_get_metadata_reg(dev, attr, error); 1752 if (reg < 0) 1753 return; 1754 MLX5_ASSERT(reg != REG_NON); 1755 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); 1756 info[idx] = (struct field_modify_info){4, 0, 1757 reg_to_field[reg]}; 1758 if (mask) 1759 mask[idx] = 1760 rte_cpu_to_be_32(0xffffffff >> 1761 (32 - width)); 1762 } 1763 break; 1764 case RTE_FLOW_FIELD_POINTER: 1765 case RTE_FLOW_FIELD_VALUE: 1766 if (data->field == RTE_FLOW_FIELD_POINTER) 1767 memcpy(&val, (void *)(uintptr_t)data->value, 1768 sizeof(uint64_t)); 1769 else 1770 val = data->value; 1771 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { 1772 if (mask[idx]) { 1773 if (dst_width > 16) { 1774 value[idx] = rte_cpu_to_be_32(val); 1775 val >>= 32; 1776 } else if (dst_width > 8) { 1777 value[idx] = rte_cpu_to_be_16(val); 1778 val >>= 16; 1779 } else { 1780 value[idx] = (uint8_t)val; 1781 val >>= 8; 1782 } 1783 if (!val) 1784 break; 1785 } 1786 } 1787 break; 1788 default: 1789 MLX5_ASSERT(false); 1790 break; 1791 } 1792 } 1793 1794 /** 1795 * Convert modify_field action to DV specification. 1796 * 1797 * @param[in] dev 1798 * Pointer to the rte_eth_dev structure. 1799 * @param[in,out] resource 1800 * Pointer to the modify-header resource. 1801 * @param[in] action 1802 * Pointer to action specification. 1803 * @param[in] attr 1804 * Attributes of flow that includes this item. 1805 * @param[out] error 1806 * Pointer to the error structure. 1807 * 1808 * @return 1809 * 0 on success, a negative errno value otherwise and rte_errno is set. 1810 */ 1811 static int 1812 flow_dv_convert_action_modify_field 1813 (struct rte_eth_dev *dev, 1814 struct mlx5_flow_dv_modify_hdr_resource *resource, 1815 const struct rte_flow_action *action, 1816 const struct rte_flow_attr *attr, 1817 struct rte_flow_error *error) 1818 { 1819 const struct rte_flow_action_modify_field *conf = 1820 (const struct rte_flow_action_modify_field *)(action->conf); 1821 struct rte_flow_item item; 1822 struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = { 1823 {0, 0, 0} }; 1824 struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = { 1825 {0, 0, 0} }; 1826 uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; 1827 uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; 1828 uint32_t type; 1829 uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field); 1830 1831 if (conf->src.field == RTE_FLOW_FIELD_POINTER || 1832 conf->src.field == RTE_FLOW_FIELD_VALUE) { 1833 type = MLX5_MODIFICATION_TYPE_SET; 1834 /** For SET fill the destination field (field) first. */ 1835 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask, 1836 value, conf->width, dst_width, dev, attr, error); 1837 /** Then copy immediate value from source as per mask. */ 1838 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask, 1839 value, conf->width, dst_width, dev, attr, error); 1840 item.spec = &value; 1841 } else { 1842 type = MLX5_MODIFICATION_TYPE_COPY; 1843 /** For COPY fill the destination field (dcopy) without mask. */ 1844 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL, 1845 value, conf->width, dst_width, dev, attr, error); 1846 /** Then construct the source field (field) with mask. */ 1847 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask, 1848 value, conf->width, dst_width, dev, attr, error); 1849 } 1850 item.mask = &mask; 1851 return flow_dv_convert_modify_action(&item, 1852 field, dcopy, resource, type, error); 1853 } 1854 1855 /** 1856 * Validate MARK item. 1857 * 1858 * @param[in] dev 1859 * Pointer to the rte_eth_dev structure. 1860 * @param[in] item 1861 * Item specification. 1862 * @param[in] attr 1863 * Attributes of flow that includes this item. 1864 * @param[out] error 1865 * Pointer to error structure. 1866 * 1867 * @return 1868 * 0 on success, a negative errno value otherwise and rte_errno is set. 1869 */ 1870 static int 1871 flow_dv_validate_item_mark(struct rte_eth_dev *dev, 1872 const struct rte_flow_item *item, 1873 const struct rte_flow_attr *attr __rte_unused, 1874 struct rte_flow_error *error) 1875 { 1876 struct mlx5_priv *priv = dev->data->dev_private; 1877 struct mlx5_dev_config *config = &priv->config; 1878 const struct rte_flow_item_mark *spec = item->spec; 1879 const struct rte_flow_item_mark *mask = item->mask; 1880 const struct rte_flow_item_mark nic_mask = { 1881 .id = priv->sh->dv_mark_mask, 1882 }; 1883 int ret; 1884 1885 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 1886 return rte_flow_error_set(error, ENOTSUP, 1887 RTE_FLOW_ERROR_TYPE_ITEM, item, 1888 "extended metadata feature" 1889 " isn't enabled"); 1890 if (!mlx5_flow_ext_mreg_supported(dev)) 1891 return rte_flow_error_set(error, ENOTSUP, 1892 RTE_FLOW_ERROR_TYPE_ITEM, item, 1893 "extended metadata register" 1894 " isn't supported"); 1895 if (!nic_mask.id) 1896 return rte_flow_error_set(error, ENOTSUP, 1897 RTE_FLOW_ERROR_TYPE_ITEM, item, 1898 "extended metadata register" 1899 " isn't available"); 1900 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1901 if (ret < 0) 1902 return ret; 1903 if (!spec) 1904 return rte_flow_error_set(error, EINVAL, 1905 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1906 item->spec, 1907 "data cannot be empty"); 1908 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id)) 1909 return rte_flow_error_set(error, EINVAL, 1910 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1911 &spec->id, 1912 "mark id exceeds the limit"); 1913 if (!mask) 1914 mask = &nic_mask; 1915 if (!mask->id) 1916 return rte_flow_error_set(error, EINVAL, 1917 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1918 "mask cannot be zero"); 1919 1920 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1921 (const uint8_t *)&nic_mask, 1922 sizeof(struct rte_flow_item_mark), 1923 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 1924 if (ret < 0) 1925 return ret; 1926 return 0; 1927 } 1928 1929 /** 1930 * Validate META item. 1931 * 1932 * @param[in] dev 1933 * Pointer to the rte_eth_dev structure. 1934 * @param[in] item 1935 * Item specification. 1936 * @param[in] attr 1937 * Attributes of flow that includes this item. 1938 * @param[out] error 1939 * Pointer to error structure. 1940 * 1941 * @return 1942 * 0 on success, a negative errno value otherwise and rte_errno is set. 1943 */ 1944 static int 1945 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, 1946 const struct rte_flow_item *item, 1947 const struct rte_flow_attr *attr, 1948 struct rte_flow_error *error) 1949 { 1950 struct mlx5_priv *priv = dev->data->dev_private; 1951 struct mlx5_dev_config *config = &priv->config; 1952 const struct rte_flow_item_meta *spec = item->spec; 1953 const struct rte_flow_item_meta *mask = item->mask; 1954 struct rte_flow_item_meta nic_mask = { 1955 .data = UINT32_MAX 1956 }; 1957 int reg; 1958 int ret; 1959 1960 if (!spec) 1961 return rte_flow_error_set(error, EINVAL, 1962 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1963 item->spec, 1964 "data cannot be empty"); 1965 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 1966 if (!mlx5_flow_ext_mreg_supported(dev)) 1967 return rte_flow_error_set(error, ENOTSUP, 1968 RTE_FLOW_ERROR_TYPE_ITEM, item, 1969 "extended metadata register" 1970 " isn't supported"); 1971 reg = flow_dv_get_metadata_reg(dev, attr, error); 1972 if (reg < 0) 1973 return reg; 1974 if (reg == REG_NON) 1975 return rte_flow_error_set(error, ENOTSUP, 1976 RTE_FLOW_ERROR_TYPE_ITEM, item, 1977 "unavalable extended metadata register"); 1978 if (reg == REG_B) 1979 return rte_flow_error_set(error, ENOTSUP, 1980 RTE_FLOW_ERROR_TYPE_ITEM, item, 1981 "match on reg_b " 1982 "isn't supported"); 1983 if (reg != REG_A) 1984 nic_mask.data = priv->sh->dv_meta_mask; 1985 } else { 1986 if (attr->transfer) 1987 return rte_flow_error_set(error, ENOTSUP, 1988 RTE_FLOW_ERROR_TYPE_ITEM, item, 1989 "extended metadata feature " 1990 "should be enabled when " 1991 "meta item is requested " 1992 "with e-switch mode "); 1993 if (attr->ingress) 1994 return rte_flow_error_set(error, ENOTSUP, 1995 RTE_FLOW_ERROR_TYPE_ITEM, item, 1996 "match on metadata for ingress " 1997 "is not supported in legacy " 1998 "metadata mode"); 1999 } 2000 if (!mask) 2001 mask = &rte_flow_item_meta_mask; 2002 if (!mask->data) 2003 return rte_flow_error_set(error, EINVAL, 2004 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 2005 "mask cannot be zero"); 2006 2007 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2008 (const uint8_t *)&nic_mask, 2009 sizeof(struct rte_flow_item_meta), 2010 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2011 return ret; 2012 } 2013 2014 /** 2015 * Validate TAG item. 2016 * 2017 * @param[in] dev 2018 * Pointer to the rte_eth_dev structure. 2019 * @param[in] item 2020 * Item specification. 2021 * @param[in] attr 2022 * Attributes of flow that includes this item. 2023 * @param[out] error 2024 * Pointer to error structure. 2025 * 2026 * @return 2027 * 0 on success, a negative errno value otherwise and rte_errno is set. 2028 */ 2029 static int 2030 flow_dv_validate_item_tag(struct rte_eth_dev *dev, 2031 const struct rte_flow_item *item, 2032 const struct rte_flow_attr *attr __rte_unused, 2033 struct rte_flow_error *error) 2034 { 2035 const struct rte_flow_item_tag *spec = item->spec; 2036 const struct rte_flow_item_tag *mask = item->mask; 2037 const struct rte_flow_item_tag nic_mask = { 2038 .data = RTE_BE32(UINT32_MAX), 2039 .index = 0xff, 2040 }; 2041 int ret; 2042 2043 if (!mlx5_flow_ext_mreg_supported(dev)) 2044 return rte_flow_error_set(error, ENOTSUP, 2045 RTE_FLOW_ERROR_TYPE_ITEM, item, 2046 "extensive metadata register" 2047 " isn't supported"); 2048 if (!spec) 2049 return rte_flow_error_set(error, EINVAL, 2050 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 2051 item->spec, 2052 "data cannot be empty"); 2053 if (!mask) 2054 mask = &rte_flow_item_tag_mask; 2055 if (!mask->data) 2056 return rte_flow_error_set(error, EINVAL, 2057 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 2058 "mask cannot be zero"); 2059 2060 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2061 (const uint8_t *)&nic_mask, 2062 sizeof(struct rte_flow_item_tag), 2063 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2064 if (ret < 0) 2065 return ret; 2066 if (mask->index != 0xff) 2067 return rte_flow_error_set(error, EINVAL, 2068 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 2069 "partial mask for tag index" 2070 " is not supported"); 2071 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); 2072 if (ret < 0) 2073 return ret; 2074 MLX5_ASSERT(ret != REG_NON); 2075 return 0; 2076 } 2077 2078 /** 2079 * Validate vport item. 2080 * 2081 * @param[in] dev 2082 * Pointer to the rte_eth_dev structure. 2083 * @param[in] item 2084 * Item specification. 2085 * @param[in] attr 2086 * Attributes of flow that includes this item. 2087 * @param[in] item_flags 2088 * Bit-fields that holds the items detected until now. 2089 * @param[out] error 2090 * Pointer to error structure. 2091 * 2092 * @return 2093 * 0 on success, a negative errno value otherwise and rte_errno is set. 2094 */ 2095 static int 2096 flow_dv_validate_item_port_id(struct rte_eth_dev *dev, 2097 const struct rte_flow_item *item, 2098 const struct rte_flow_attr *attr, 2099 uint64_t item_flags, 2100 struct rte_flow_error *error) 2101 { 2102 const struct rte_flow_item_port_id *spec = item->spec; 2103 const struct rte_flow_item_port_id *mask = item->mask; 2104 const struct rte_flow_item_port_id switch_mask = { 2105 .id = 0xffffffff, 2106 }; 2107 struct mlx5_priv *esw_priv; 2108 struct mlx5_priv *dev_priv; 2109 int ret; 2110 2111 if (!attr->transfer) 2112 return rte_flow_error_set(error, EINVAL, 2113 RTE_FLOW_ERROR_TYPE_ITEM, 2114 NULL, 2115 "match on port id is valid only" 2116 " when transfer flag is enabled"); 2117 if (item_flags & MLX5_FLOW_ITEM_PORT_ID) 2118 return rte_flow_error_set(error, ENOTSUP, 2119 RTE_FLOW_ERROR_TYPE_ITEM, item, 2120 "multiple source ports are not" 2121 " supported"); 2122 if (!mask) 2123 mask = &switch_mask; 2124 if (mask->id != 0xffffffff) 2125 return rte_flow_error_set(error, ENOTSUP, 2126 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 2127 mask, 2128 "no support for partial mask on" 2129 " \"id\" field"); 2130 ret = mlx5_flow_item_acceptable 2131 (item, (const uint8_t *)mask, 2132 (const uint8_t *)&rte_flow_item_port_id_mask, 2133 sizeof(struct rte_flow_item_port_id), 2134 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2135 if (ret) 2136 return ret; 2137 if (!spec) 2138 return 0; 2139 esw_priv = mlx5_port_to_eswitch_info(spec->id, false); 2140 if (!esw_priv) 2141 return rte_flow_error_set(error, rte_errno, 2142 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 2143 "failed to obtain E-Switch info for" 2144 " port"); 2145 dev_priv = mlx5_dev_to_eswitch_info(dev); 2146 if (!dev_priv) 2147 return rte_flow_error_set(error, rte_errno, 2148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2149 NULL, 2150 "failed to obtain E-Switch info"); 2151 if (esw_priv->domain_id != dev_priv->domain_id) 2152 return rte_flow_error_set(error, EINVAL, 2153 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 2154 "cannot match on a port from a" 2155 " different E-Switch"); 2156 return 0; 2157 } 2158 2159 /** 2160 * Validate VLAN item. 2161 * 2162 * @param[in] item 2163 * Item specification. 2164 * @param[in] item_flags 2165 * Bit-fields that holds the items detected until now. 2166 * @param[in] dev 2167 * Ethernet device flow is being created on. 2168 * @param[out] error 2169 * Pointer to error structure. 2170 * 2171 * @return 2172 * 0 on success, a negative errno value otherwise and rte_errno is set. 2173 */ 2174 static int 2175 flow_dv_validate_item_vlan(const struct rte_flow_item *item, 2176 uint64_t item_flags, 2177 struct rte_eth_dev *dev, 2178 struct rte_flow_error *error) 2179 { 2180 const struct rte_flow_item_vlan *mask = item->mask; 2181 const struct rte_flow_item_vlan nic_mask = { 2182 .tci = RTE_BE16(UINT16_MAX), 2183 .inner_type = RTE_BE16(UINT16_MAX), 2184 .has_more_vlan = 1, 2185 }; 2186 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2187 int ret; 2188 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 2189 MLX5_FLOW_LAYER_INNER_L4) : 2190 (MLX5_FLOW_LAYER_OUTER_L3 | 2191 MLX5_FLOW_LAYER_OUTER_L4); 2192 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 2193 MLX5_FLOW_LAYER_OUTER_VLAN; 2194 2195 if (item_flags & vlanm) 2196 return rte_flow_error_set(error, EINVAL, 2197 RTE_FLOW_ERROR_TYPE_ITEM, item, 2198 "multiple VLAN layers not supported"); 2199 else if ((item_flags & l34m) != 0) 2200 return rte_flow_error_set(error, EINVAL, 2201 RTE_FLOW_ERROR_TYPE_ITEM, item, 2202 "VLAN cannot follow L3/L4 layer"); 2203 if (!mask) 2204 mask = &rte_flow_item_vlan_mask; 2205 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2206 (const uint8_t *)&nic_mask, 2207 sizeof(struct rte_flow_item_vlan), 2208 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2209 if (ret) 2210 return ret; 2211 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 2212 struct mlx5_priv *priv = dev->data->dev_private; 2213 2214 if (priv->vmwa_context) { 2215 /* 2216 * Non-NULL context means we have a virtual machine 2217 * and SR-IOV enabled, we have to create VLAN interface 2218 * to make hypervisor to setup E-Switch vport 2219 * context correctly. We avoid creating the multiple 2220 * VLAN interfaces, so we cannot support VLAN tag mask. 2221 */ 2222 return rte_flow_error_set(error, EINVAL, 2223 RTE_FLOW_ERROR_TYPE_ITEM, 2224 item, 2225 "VLAN tag mask is not" 2226 " supported in virtual" 2227 " environment"); 2228 } 2229 } 2230 return 0; 2231 } 2232 2233 /* 2234 * GTP flags are contained in 1 byte of the format: 2235 * ------------------------------------------- 2236 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | 2237 * |-----------------------------------------| 2238 * | value | Version | PT | Res | E | S | PN | 2239 * ------------------------------------------- 2240 * 2241 * Matching is supported only for GTP flags E, S, PN. 2242 */ 2243 #define MLX5_GTP_FLAGS_MASK 0x07 2244 2245 /** 2246 * Validate GTP item. 2247 * 2248 * @param[in] dev 2249 * Pointer to the rte_eth_dev structure. 2250 * @param[in] item 2251 * Item specification. 2252 * @param[in] item_flags 2253 * Bit-fields that holds the items detected until now. 2254 * @param[out] error 2255 * Pointer to error structure. 2256 * 2257 * @return 2258 * 0 on success, a negative errno value otherwise and rte_errno is set. 2259 */ 2260 static int 2261 flow_dv_validate_item_gtp(struct rte_eth_dev *dev, 2262 const struct rte_flow_item *item, 2263 uint64_t item_flags, 2264 struct rte_flow_error *error) 2265 { 2266 struct mlx5_priv *priv = dev->data->dev_private; 2267 const struct rte_flow_item_gtp *spec = item->spec; 2268 const struct rte_flow_item_gtp *mask = item->mask; 2269 const struct rte_flow_item_gtp nic_mask = { 2270 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK, 2271 .msg_type = 0xff, 2272 .teid = RTE_BE32(0xffffffff), 2273 }; 2274 2275 if (!priv->config.hca_attr.tunnel_stateless_gtp) 2276 return rte_flow_error_set(error, ENOTSUP, 2277 RTE_FLOW_ERROR_TYPE_ITEM, item, 2278 "GTP support is not enabled"); 2279 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2280 return rte_flow_error_set(error, ENOTSUP, 2281 RTE_FLOW_ERROR_TYPE_ITEM, item, 2282 "multiple tunnel layers not" 2283 " supported"); 2284 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2285 return rte_flow_error_set(error, EINVAL, 2286 RTE_FLOW_ERROR_TYPE_ITEM, item, 2287 "no outer UDP layer found"); 2288 if (!mask) 2289 mask = &rte_flow_item_gtp_mask; 2290 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK) 2291 return rte_flow_error_set(error, ENOTSUP, 2292 RTE_FLOW_ERROR_TYPE_ITEM, item, 2293 "Match is supported for GTP" 2294 " flags only"); 2295 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2296 (const uint8_t *)&nic_mask, 2297 sizeof(struct rte_flow_item_gtp), 2298 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2299 } 2300 2301 /** 2302 * Validate GTP PSC item. 2303 * 2304 * @param[in] item 2305 * Item specification. 2306 * @param[in] last_item 2307 * Previous validated item in the pattern items. 2308 * @param[in] gtp_item 2309 * Previous GTP item specification. 2310 * @param[in] attr 2311 * Pointer to flow attributes. 2312 * @param[out] error 2313 * Pointer to error structure. 2314 * 2315 * @return 2316 * 0 on success, a negative errno value otherwise and rte_errno is set. 2317 */ 2318 static int 2319 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item, 2320 uint64_t last_item, 2321 const struct rte_flow_item *gtp_item, 2322 const struct rte_flow_attr *attr, 2323 struct rte_flow_error *error) 2324 { 2325 const struct rte_flow_item_gtp *gtp_spec; 2326 const struct rte_flow_item_gtp *gtp_mask; 2327 const struct rte_flow_item_gtp_psc *spec; 2328 const struct rte_flow_item_gtp_psc *mask; 2329 const struct rte_flow_item_gtp_psc nic_mask = { 2330 .pdu_type = 0xFF, 2331 .qfi = 0xFF, 2332 }; 2333 2334 if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP)) 2335 return rte_flow_error_set 2336 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 2337 "GTP PSC item must be preceded with GTP item"); 2338 gtp_spec = gtp_item->spec; 2339 gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask; 2340 /* GTP spec and E flag is requested to match zero. */ 2341 if (gtp_spec && 2342 (gtp_mask->v_pt_rsv_flags & 2343 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG)) 2344 return rte_flow_error_set 2345 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 2346 "GTP E flag must be 1 to match GTP PSC"); 2347 /* Check the flow is not created in group zero. */ 2348 if (!attr->transfer && !attr->group) 2349 return rte_flow_error_set 2350 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2351 "GTP PSC is not supported for group 0"); 2352 /* GTP spec is here and E flag is requested to match zero. */ 2353 if (!item->spec) 2354 return 0; 2355 spec = item->spec; 2356 mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask; 2357 if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE) 2358 return rte_flow_error_set 2359 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 2360 "PDU type should be smaller than 16"); 2361 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2362 (const uint8_t *)&nic_mask, 2363 sizeof(struct rte_flow_item_gtp_psc), 2364 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2365 } 2366 2367 /** 2368 * Validate IPV4 item. 2369 * Use existing validation function mlx5_flow_validate_item_ipv4(), and 2370 * add specific validation of fragment_offset field, 2371 * 2372 * @param[in] item 2373 * Item specification. 2374 * @param[in] item_flags 2375 * Bit-fields that holds the items detected until now. 2376 * @param[out] error 2377 * Pointer to error structure. 2378 * 2379 * @return 2380 * 0 on success, a negative errno value otherwise and rte_errno is set. 2381 */ 2382 static int 2383 flow_dv_validate_item_ipv4(const struct rte_flow_item *item, 2384 uint64_t item_flags, 2385 uint64_t last_item, 2386 uint16_t ether_type, 2387 struct rte_flow_error *error) 2388 { 2389 int ret; 2390 const struct rte_flow_item_ipv4 *spec = item->spec; 2391 const struct rte_flow_item_ipv4 *last = item->last; 2392 const struct rte_flow_item_ipv4 *mask = item->mask; 2393 rte_be16_t fragment_offset_spec = 0; 2394 rte_be16_t fragment_offset_last = 0; 2395 const struct rte_flow_item_ipv4 nic_ipv4_mask = { 2396 .hdr = { 2397 .src_addr = RTE_BE32(0xffffffff), 2398 .dst_addr = RTE_BE32(0xffffffff), 2399 .type_of_service = 0xff, 2400 .fragment_offset = RTE_BE16(0xffff), 2401 .next_proto_id = 0xff, 2402 .time_to_live = 0xff, 2403 }, 2404 }; 2405 2406 ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item, 2407 ether_type, &nic_ipv4_mask, 2408 MLX5_ITEM_RANGE_ACCEPTED, error); 2409 if (ret < 0) 2410 return ret; 2411 if (spec && mask) 2412 fragment_offset_spec = spec->hdr.fragment_offset & 2413 mask->hdr.fragment_offset; 2414 if (!fragment_offset_spec) 2415 return 0; 2416 /* 2417 * spec and mask are valid, enforce using full mask to make sure the 2418 * complete value is used correctly. 2419 */ 2420 if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) 2421 != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) 2422 return rte_flow_error_set(error, EINVAL, 2423 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 2424 item, "must use full mask for" 2425 " fragment_offset"); 2426 /* 2427 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0, 2428 * indicating this is 1st fragment of fragmented packet. 2429 * This is not yet supported in MLX5, return appropriate error message. 2430 */ 2431 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG)) 2432 return rte_flow_error_set(error, ENOTSUP, 2433 RTE_FLOW_ERROR_TYPE_ITEM, item, 2434 "match on first fragment not " 2435 "supported"); 2436 if (fragment_offset_spec && !last) 2437 return rte_flow_error_set(error, ENOTSUP, 2438 RTE_FLOW_ERROR_TYPE_ITEM, item, 2439 "specified value not supported"); 2440 /* spec and last are valid, validate the specified range. */ 2441 fragment_offset_last = last->hdr.fragment_offset & 2442 mask->hdr.fragment_offset; 2443 /* 2444 * Match on fragment_offset spec 0x2001 and last 0x3fff 2445 * means MF is 1 and frag-offset is > 0. 2446 * This packet is fragment 2nd and onward, excluding last. 2447 * This is not yet supported in MLX5, return appropriate 2448 * error message. 2449 */ 2450 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) && 2451 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)) 2452 return rte_flow_error_set(error, ENOTSUP, 2453 RTE_FLOW_ERROR_TYPE_ITEM_LAST, 2454 last, "match on following " 2455 "fragments not supported"); 2456 /* 2457 * Match on fragment_offset spec 0x0001 and last 0x1fff 2458 * means MF is 0 and frag-offset is > 0. 2459 * This packet is last fragment of fragmented packet. 2460 * This is not yet supported in MLX5, return appropriate 2461 * error message. 2462 */ 2463 if (fragment_offset_spec == RTE_BE16(1) && 2464 fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK)) 2465 return rte_flow_error_set(error, ENOTSUP, 2466 RTE_FLOW_ERROR_TYPE_ITEM_LAST, 2467 last, "match on last " 2468 "fragment not supported"); 2469 /* 2470 * Match on fragment_offset spec 0x0001 and last 0x3fff 2471 * means MF and/or frag-offset is not 0. 2472 * This is a fragmented packet. 2473 * Other range values are invalid and rejected. 2474 */ 2475 if (!(fragment_offset_spec == RTE_BE16(1) && 2476 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))) 2477 return rte_flow_error_set(error, ENOTSUP, 2478 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, 2479 "specified range not supported"); 2480 return 0; 2481 } 2482 2483 /** 2484 * Validate IPV6 fragment extension item. 2485 * 2486 * @param[in] item 2487 * Item specification. 2488 * @param[in] item_flags 2489 * Bit-fields that holds the items detected until now. 2490 * @param[out] error 2491 * Pointer to error structure. 2492 * 2493 * @return 2494 * 0 on success, a negative errno value otherwise and rte_errno is set. 2495 */ 2496 static int 2497 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, 2498 uint64_t item_flags, 2499 struct rte_flow_error *error) 2500 { 2501 const struct rte_flow_item_ipv6_frag_ext *spec = item->spec; 2502 const struct rte_flow_item_ipv6_frag_ext *last = item->last; 2503 const struct rte_flow_item_ipv6_frag_ext *mask = item->mask; 2504 rte_be16_t frag_data_spec = 0; 2505 rte_be16_t frag_data_last = 0; 2506 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2507 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2508 MLX5_FLOW_LAYER_OUTER_L4; 2509 int ret = 0; 2510 struct rte_flow_item_ipv6_frag_ext nic_mask = { 2511 .hdr = { 2512 .next_header = 0xff, 2513 .frag_data = RTE_BE16(0xffff), 2514 }, 2515 }; 2516 2517 if (item_flags & l4m) 2518 return rte_flow_error_set(error, EINVAL, 2519 RTE_FLOW_ERROR_TYPE_ITEM, item, 2520 "ipv6 fragment extension item cannot " 2521 "follow L4 item."); 2522 if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || 2523 (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) 2524 return rte_flow_error_set(error, EINVAL, 2525 RTE_FLOW_ERROR_TYPE_ITEM, item, 2526 "ipv6 fragment extension item must " 2527 "follow ipv6 item"); 2528 if (spec && mask) 2529 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data; 2530 if (!frag_data_spec) 2531 return 0; 2532 /* 2533 * spec and mask are valid, enforce using full mask to make sure the 2534 * complete value is used correctly. 2535 */ 2536 if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) != 2537 RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) 2538 return rte_flow_error_set(error, EINVAL, 2539 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 2540 item, "must use full mask for" 2541 " frag_data"); 2542 /* 2543 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0. 2544 * This is 1st fragment of fragmented packet. 2545 */ 2546 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK)) 2547 return rte_flow_error_set(error, ENOTSUP, 2548 RTE_FLOW_ERROR_TYPE_ITEM, item, 2549 "match on first fragment not " 2550 "supported"); 2551 if (frag_data_spec && !last) 2552 return rte_flow_error_set(error, EINVAL, 2553 RTE_FLOW_ERROR_TYPE_ITEM, item, 2554 "specified value not supported"); 2555 ret = mlx5_flow_item_acceptable 2556 (item, (const uint8_t *)mask, 2557 (const uint8_t *)&nic_mask, 2558 sizeof(struct rte_flow_item_ipv6_frag_ext), 2559 MLX5_ITEM_RANGE_ACCEPTED, error); 2560 if (ret) 2561 return ret; 2562 /* spec and last are valid, validate the specified range. */ 2563 frag_data_last = last->hdr.frag_data & mask->hdr.frag_data; 2564 /* 2565 * Match on frag_data spec 0x0009 and last 0xfff9 2566 * means M is 1 and frag-offset is > 0. 2567 * This packet is fragment 2nd and onward, excluding last. 2568 * This is not yet supported in MLX5, return appropriate 2569 * error message. 2570 */ 2571 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN | 2572 RTE_IPV6_EHDR_MF_MASK) && 2573 frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) 2574 return rte_flow_error_set(error, ENOTSUP, 2575 RTE_FLOW_ERROR_TYPE_ITEM_LAST, 2576 last, "match on following " 2577 "fragments not supported"); 2578 /* 2579 * Match on frag_data spec 0x0008 and last 0xfff8 2580 * means M is 0 and frag-offset is > 0. 2581 * This packet is last fragment of fragmented packet. 2582 * This is not yet supported in MLX5, return appropriate 2583 * error message. 2584 */ 2585 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) && 2586 frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK)) 2587 return rte_flow_error_set(error, ENOTSUP, 2588 RTE_FLOW_ERROR_TYPE_ITEM_LAST, 2589 last, "match on last " 2590 "fragment not supported"); 2591 /* Other range values are invalid and rejected. */ 2592 return rte_flow_error_set(error, EINVAL, 2593 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, 2594 "specified range not supported"); 2595 } 2596 2597 /** 2598 * Validate the pop VLAN action. 2599 * 2600 * @param[in] dev 2601 * Pointer to the rte_eth_dev structure. 2602 * @param[in] action_flags 2603 * Holds the actions detected until now. 2604 * @param[in] action 2605 * Pointer to the pop vlan action. 2606 * @param[in] item_flags 2607 * The items found in this flow rule. 2608 * @param[in] attr 2609 * Pointer to flow attributes. 2610 * @param[out] error 2611 * Pointer to error structure. 2612 * 2613 * @return 2614 * 0 on success, a negative errno value otherwise and rte_errno is set. 2615 */ 2616 static int 2617 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, 2618 uint64_t action_flags, 2619 const struct rte_flow_action *action, 2620 uint64_t item_flags, 2621 const struct rte_flow_attr *attr, 2622 struct rte_flow_error *error) 2623 { 2624 const struct mlx5_priv *priv = dev->data->dev_private; 2625 2626 (void)action; 2627 (void)attr; 2628 if (!priv->sh->pop_vlan_action) 2629 return rte_flow_error_set(error, ENOTSUP, 2630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2631 NULL, 2632 "pop vlan action is not supported"); 2633 if (attr->egress) 2634 return rte_flow_error_set(error, ENOTSUP, 2635 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 2636 NULL, 2637 "pop vlan action not supported for " 2638 "egress"); 2639 if (action_flags & MLX5_FLOW_VLAN_ACTIONS) 2640 return rte_flow_error_set(error, ENOTSUP, 2641 RTE_FLOW_ERROR_TYPE_ACTION, action, 2642 "no support for multiple VLAN " 2643 "actions"); 2644 /* Pop VLAN with preceding Decap requires inner header with VLAN. */ 2645 if ((action_flags & MLX5_FLOW_ACTION_DECAP) && 2646 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN)) 2647 return rte_flow_error_set(error, ENOTSUP, 2648 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2649 NULL, 2650 "cannot pop vlan after decap without " 2651 "match on inner vlan in the flow"); 2652 /* Pop VLAN without preceding Decap requires outer header with VLAN. */ 2653 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) && 2654 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 2655 return rte_flow_error_set(error, ENOTSUP, 2656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2657 NULL, 2658 "cannot pop vlan without a " 2659 "match on (outer) vlan in the flow"); 2660 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2661 return rte_flow_error_set(error, EINVAL, 2662 RTE_FLOW_ERROR_TYPE_ACTION, action, 2663 "wrong action order, port_id should " 2664 "be after pop VLAN action"); 2665 if (!attr->transfer && priv->representor) 2666 return rte_flow_error_set(error, ENOTSUP, 2667 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2668 "pop vlan action for VF representor " 2669 "not supported on NIC table"); 2670 return 0; 2671 } 2672 2673 /** 2674 * Get VLAN default info from vlan match info. 2675 * 2676 * @param[in] items 2677 * the list of item specifications. 2678 * @param[out] vlan 2679 * pointer VLAN info to fill to. 2680 * 2681 * @return 2682 * 0 on success, a negative errno value otherwise and rte_errno is set. 2683 */ 2684 static void 2685 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, 2686 struct rte_vlan_hdr *vlan) 2687 { 2688 const struct rte_flow_item_vlan nic_mask = { 2689 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK | 2690 MLX5DV_FLOW_VLAN_VID_MASK), 2691 .inner_type = RTE_BE16(0xffff), 2692 }; 2693 2694 if (items == NULL) 2695 return; 2696 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 2697 int type = items->type; 2698 2699 if (type == RTE_FLOW_ITEM_TYPE_VLAN || 2700 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 2701 break; 2702 } 2703 if (items->type != RTE_FLOW_ITEM_TYPE_END) { 2704 const struct rte_flow_item_vlan *vlan_m = items->mask; 2705 const struct rte_flow_item_vlan *vlan_v = items->spec; 2706 2707 /* If VLAN item in pattern doesn't contain data, return here. */ 2708 if (!vlan_v) 2709 return; 2710 if (!vlan_m) 2711 vlan_m = &nic_mask; 2712 /* Only full match values are accepted */ 2713 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == 2714 MLX5DV_FLOW_VLAN_PCP_MASK_BE) { 2715 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 2716 vlan->vlan_tci |= 2717 rte_be_to_cpu_16(vlan_v->tci & 2718 MLX5DV_FLOW_VLAN_PCP_MASK_BE); 2719 } 2720 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) == 2721 MLX5DV_FLOW_VLAN_VID_MASK_BE) { 2722 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 2723 vlan->vlan_tci |= 2724 rte_be_to_cpu_16(vlan_v->tci & 2725 MLX5DV_FLOW_VLAN_VID_MASK_BE); 2726 } 2727 if (vlan_m->inner_type == nic_mask.inner_type) 2728 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type & 2729 vlan_m->inner_type); 2730 } 2731 } 2732 2733 /** 2734 * Validate the push VLAN action. 2735 * 2736 * @param[in] dev 2737 * Pointer to the rte_eth_dev structure. 2738 * @param[in] action_flags 2739 * Holds the actions detected until now. 2740 * @param[in] item_flags 2741 * The items found in this flow rule. 2742 * @param[in] action 2743 * Pointer to the action structure. 2744 * @param[in] attr 2745 * Pointer to flow attributes 2746 * @param[out] error 2747 * Pointer to error structure. 2748 * 2749 * @return 2750 * 0 on success, a negative errno value otherwise and rte_errno is set. 2751 */ 2752 static int 2753 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, 2754 uint64_t action_flags, 2755 const struct rte_flow_item_vlan *vlan_m, 2756 const struct rte_flow_action *action, 2757 const struct rte_flow_attr *attr, 2758 struct rte_flow_error *error) 2759 { 2760 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; 2761 const struct mlx5_priv *priv = dev->data->dev_private; 2762 2763 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && 2764 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) 2765 return rte_flow_error_set(error, EINVAL, 2766 RTE_FLOW_ERROR_TYPE_ACTION, action, 2767 "invalid vlan ethertype"); 2768 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2769 return rte_flow_error_set(error, EINVAL, 2770 RTE_FLOW_ERROR_TYPE_ACTION, action, 2771 "wrong action order, port_id should " 2772 "be after push VLAN"); 2773 if (!attr->transfer && priv->representor) 2774 return rte_flow_error_set(error, ENOTSUP, 2775 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2776 "push vlan action for VF representor " 2777 "not supported on NIC table"); 2778 if (vlan_m && 2779 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) && 2780 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) != 2781 MLX5DV_FLOW_VLAN_PCP_MASK_BE && 2782 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) && 2783 !(mlx5_flow_find_action 2784 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP))) 2785 return rte_flow_error_set(error, EINVAL, 2786 RTE_FLOW_ERROR_TYPE_ACTION, action, 2787 "not full match mask on VLAN PCP and " 2788 "there is no of_set_vlan_pcp action, " 2789 "push VLAN action cannot figure out " 2790 "PCP value"); 2791 if (vlan_m && 2792 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) && 2793 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) != 2794 MLX5DV_FLOW_VLAN_VID_MASK_BE && 2795 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) && 2796 !(mlx5_flow_find_action 2797 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))) 2798 return rte_flow_error_set(error, EINVAL, 2799 RTE_FLOW_ERROR_TYPE_ACTION, action, 2800 "not full match mask on VLAN VID and " 2801 "there is no of_set_vlan_vid action, " 2802 "push VLAN action cannot figure out " 2803 "VID value"); 2804 (void)attr; 2805 return 0; 2806 } 2807 2808 /** 2809 * Validate the set VLAN PCP. 2810 * 2811 * @param[in] action_flags 2812 * Holds the actions detected until now. 2813 * @param[in] actions 2814 * Pointer to the list of actions remaining in the flow rule. 2815 * @param[out] error 2816 * Pointer to error structure. 2817 * 2818 * @return 2819 * 0 on success, a negative errno value otherwise and rte_errno is set. 2820 */ 2821 static int 2822 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, 2823 const struct rte_flow_action actions[], 2824 struct rte_flow_error *error) 2825 { 2826 const struct rte_flow_action *action = actions; 2827 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf; 2828 2829 if (conf->vlan_pcp > 7) 2830 return rte_flow_error_set(error, EINVAL, 2831 RTE_FLOW_ERROR_TYPE_ACTION, action, 2832 "VLAN PCP value is too big"); 2833 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)) 2834 return rte_flow_error_set(error, ENOTSUP, 2835 RTE_FLOW_ERROR_TYPE_ACTION, action, 2836 "set VLAN PCP action must follow " 2837 "the push VLAN action"); 2838 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) 2839 return rte_flow_error_set(error, ENOTSUP, 2840 RTE_FLOW_ERROR_TYPE_ACTION, action, 2841 "Multiple VLAN PCP modification are " 2842 "not supported"); 2843 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2844 return rte_flow_error_set(error, EINVAL, 2845 RTE_FLOW_ERROR_TYPE_ACTION, action, 2846 "wrong action order, port_id should " 2847 "be after set VLAN PCP"); 2848 return 0; 2849 } 2850 2851 /** 2852 * Validate the set VLAN VID. 2853 * 2854 * @param[in] item_flags 2855 * Holds the items detected in this rule. 2856 * @param[in] action_flags 2857 * Holds the actions detected until now. 2858 * @param[in] actions 2859 * Pointer to the list of actions remaining in the flow rule. 2860 * @param[out] error 2861 * Pointer to error structure. 2862 * 2863 * @return 2864 * 0 on success, a negative errno value otherwise and rte_errno is set. 2865 */ 2866 static int 2867 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, 2868 uint64_t action_flags, 2869 const struct rte_flow_action actions[], 2870 struct rte_flow_error *error) 2871 { 2872 const struct rte_flow_action *action = actions; 2873 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf; 2874 2875 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE) 2876 return rte_flow_error_set(error, EINVAL, 2877 RTE_FLOW_ERROR_TYPE_ACTION, action, 2878 "VLAN VID value is too big"); 2879 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) && 2880 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 2881 return rte_flow_error_set(error, ENOTSUP, 2882 RTE_FLOW_ERROR_TYPE_ACTION, action, 2883 "set VLAN VID action must follow push" 2884 " VLAN action or match on VLAN item"); 2885 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) 2886 return rte_flow_error_set(error, ENOTSUP, 2887 RTE_FLOW_ERROR_TYPE_ACTION, action, 2888 "Multiple VLAN VID modifications are " 2889 "not supported"); 2890 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2891 return rte_flow_error_set(error, EINVAL, 2892 RTE_FLOW_ERROR_TYPE_ACTION, action, 2893 "wrong action order, port_id should " 2894 "be after set VLAN VID"); 2895 return 0; 2896 } 2897 2898 /* 2899 * Validate the FLAG action. 2900 * 2901 * @param[in] dev 2902 * Pointer to the rte_eth_dev structure. 2903 * @param[in] action_flags 2904 * Holds the actions detected until now. 2905 * @param[in] attr 2906 * Pointer to flow attributes 2907 * @param[out] error 2908 * Pointer to error structure. 2909 * 2910 * @return 2911 * 0 on success, a negative errno value otherwise and rte_errno is set. 2912 */ 2913 static int 2914 flow_dv_validate_action_flag(struct rte_eth_dev *dev, 2915 uint64_t action_flags, 2916 const struct rte_flow_attr *attr, 2917 struct rte_flow_error *error) 2918 { 2919 struct mlx5_priv *priv = dev->data->dev_private; 2920 struct mlx5_dev_config *config = &priv->config; 2921 int ret; 2922 2923 /* Fall back if no extended metadata register support. */ 2924 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2925 return mlx5_flow_validate_action_flag(action_flags, attr, 2926 error); 2927 /* Extensive metadata mode requires registers. */ 2928 if (!mlx5_flow_ext_mreg_supported(dev)) 2929 return rte_flow_error_set(error, ENOTSUP, 2930 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2931 "no metadata registers " 2932 "to support flag action"); 2933 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT)) 2934 return rte_flow_error_set(error, ENOTSUP, 2935 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2936 "extended metadata register" 2937 " isn't available"); 2938 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2939 if (ret < 0) 2940 return ret; 2941 MLX5_ASSERT(ret > 0); 2942 if (action_flags & MLX5_FLOW_ACTION_MARK) 2943 return rte_flow_error_set(error, EINVAL, 2944 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2945 "can't mark and flag in same flow"); 2946 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2947 return rte_flow_error_set(error, EINVAL, 2948 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2949 "can't have 2 flag" 2950 " actions in same flow"); 2951 return 0; 2952 } 2953 2954 /** 2955 * Validate MARK action. 2956 * 2957 * @param[in] dev 2958 * Pointer to the rte_eth_dev structure. 2959 * @param[in] action 2960 * Pointer to action. 2961 * @param[in] action_flags 2962 * Holds the actions detected until now. 2963 * @param[in] attr 2964 * Pointer to flow attributes 2965 * @param[out] error 2966 * Pointer to error structure. 2967 * 2968 * @return 2969 * 0 on success, a negative errno value otherwise and rte_errno is set. 2970 */ 2971 static int 2972 flow_dv_validate_action_mark(struct rte_eth_dev *dev, 2973 const struct rte_flow_action *action, 2974 uint64_t action_flags, 2975 const struct rte_flow_attr *attr, 2976 struct rte_flow_error *error) 2977 { 2978 struct mlx5_priv *priv = dev->data->dev_private; 2979 struct mlx5_dev_config *config = &priv->config; 2980 const struct rte_flow_action_mark *mark = action->conf; 2981 int ret; 2982 2983 if (is_tunnel_offload_active(dev)) 2984 return rte_flow_error_set(error, ENOTSUP, 2985 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2986 "no mark action " 2987 "if tunnel offload active"); 2988 /* Fall back if no extended metadata register support. */ 2989 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2990 return mlx5_flow_validate_action_mark(action, action_flags, 2991 attr, error); 2992 /* Extensive metadata mode requires registers. */ 2993 if (!mlx5_flow_ext_mreg_supported(dev)) 2994 return rte_flow_error_set(error, ENOTSUP, 2995 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2996 "no metadata registers " 2997 "to support mark action"); 2998 if (!priv->sh->dv_mark_mask) 2999 return rte_flow_error_set(error, ENOTSUP, 3000 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3001 "extended metadata register" 3002 " isn't available"); 3003 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 3004 if (ret < 0) 3005 return ret; 3006 MLX5_ASSERT(ret > 0); 3007 if (!mark) 3008 return rte_flow_error_set(error, EINVAL, 3009 RTE_FLOW_ERROR_TYPE_ACTION, action, 3010 "configuration cannot be null"); 3011 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask)) 3012 return rte_flow_error_set(error, EINVAL, 3013 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3014 &mark->id, 3015 "mark id exceeds the limit"); 3016 if (action_flags & MLX5_FLOW_ACTION_FLAG) 3017 return rte_flow_error_set(error, EINVAL, 3018 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3019 "can't flag and mark in same flow"); 3020 if (action_flags & MLX5_FLOW_ACTION_MARK) 3021 return rte_flow_error_set(error, EINVAL, 3022 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3023 "can't have 2 mark actions in same" 3024 " flow"); 3025 return 0; 3026 } 3027 3028 /** 3029 * Validate SET_META action. 3030 * 3031 * @param[in] dev 3032 * Pointer to the rte_eth_dev structure. 3033 * @param[in] action 3034 * Pointer to the action structure. 3035 * @param[in] action_flags 3036 * Holds the actions detected until now. 3037 * @param[in] attr 3038 * Pointer to flow attributes 3039 * @param[out] error 3040 * Pointer to error structure. 3041 * 3042 * @return 3043 * 0 on success, a negative errno value otherwise and rte_errno is set. 3044 */ 3045 static int 3046 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, 3047 const struct rte_flow_action *action, 3048 uint64_t action_flags __rte_unused, 3049 const struct rte_flow_attr *attr, 3050 struct rte_flow_error *error) 3051 { 3052 const struct rte_flow_action_set_meta *conf; 3053 uint32_t nic_mask = UINT32_MAX; 3054 int reg; 3055 3056 if (!mlx5_flow_ext_mreg_supported(dev)) 3057 return rte_flow_error_set(error, ENOTSUP, 3058 RTE_FLOW_ERROR_TYPE_ACTION, action, 3059 "extended metadata register" 3060 " isn't supported"); 3061 reg = flow_dv_get_metadata_reg(dev, attr, error); 3062 if (reg < 0) 3063 return reg; 3064 if (reg == REG_NON) 3065 return rte_flow_error_set(error, ENOTSUP, 3066 RTE_FLOW_ERROR_TYPE_ACTION, action, 3067 "unavalable extended metadata register"); 3068 if (reg != REG_A && reg != REG_B) { 3069 struct mlx5_priv *priv = dev->data->dev_private; 3070 3071 nic_mask = priv->sh->dv_meta_mask; 3072 } 3073 if (!(action->conf)) 3074 return rte_flow_error_set(error, EINVAL, 3075 RTE_FLOW_ERROR_TYPE_ACTION, action, 3076 "configuration cannot be null"); 3077 conf = (const struct rte_flow_action_set_meta *)action->conf; 3078 if (!conf->mask) 3079 return rte_flow_error_set(error, EINVAL, 3080 RTE_FLOW_ERROR_TYPE_ACTION, action, 3081 "zero mask doesn't have any effect"); 3082 if (conf->mask & ~nic_mask) 3083 return rte_flow_error_set(error, EINVAL, 3084 RTE_FLOW_ERROR_TYPE_ACTION, action, 3085 "meta data must be within reg C0"); 3086 return 0; 3087 } 3088 3089 /** 3090 * Validate SET_TAG action. 3091 * 3092 * @param[in] dev 3093 * Pointer to the rte_eth_dev structure. 3094 * @param[in] action 3095 * Pointer to the action structure. 3096 * @param[in] action_flags 3097 * Holds the actions detected until now. 3098 * @param[in] attr 3099 * Pointer to flow attributes 3100 * @param[out] error 3101 * Pointer to error structure. 3102 * 3103 * @return 3104 * 0 on success, a negative errno value otherwise and rte_errno is set. 3105 */ 3106 static int 3107 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, 3108 const struct rte_flow_action *action, 3109 uint64_t action_flags, 3110 const struct rte_flow_attr *attr, 3111 struct rte_flow_error *error) 3112 { 3113 const struct rte_flow_action_set_tag *conf; 3114 const uint64_t terminal_action_flags = 3115 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | 3116 MLX5_FLOW_ACTION_RSS; 3117 int ret; 3118 3119 if (!mlx5_flow_ext_mreg_supported(dev)) 3120 return rte_flow_error_set(error, ENOTSUP, 3121 RTE_FLOW_ERROR_TYPE_ACTION, action, 3122 "extensive metadata register" 3123 " isn't supported"); 3124 if (!(action->conf)) 3125 return rte_flow_error_set(error, EINVAL, 3126 RTE_FLOW_ERROR_TYPE_ACTION, action, 3127 "configuration cannot be null"); 3128 conf = (const struct rte_flow_action_set_tag *)action->conf; 3129 if (!conf->mask) 3130 return rte_flow_error_set(error, EINVAL, 3131 RTE_FLOW_ERROR_TYPE_ACTION, action, 3132 "zero mask doesn't have any effect"); 3133 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 3134 if (ret < 0) 3135 return ret; 3136 if (!attr->transfer && attr->ingress && 3137 (action_flags & terminal_action_flags)) 3138 return rte_flow_error_set(error, EINVAL, 3139 RTE_FLOW_ERROR_TYPE_ACTION, action, 3140 "set_tag has no effect" 3141 " with terminal actions"); 3142 return 0; 3143 } 3144 3145 /** 3146 * Validate count action. 3147 * 3148 * @param[in] dev 3149 * Pointer to rte_eth_dev structure. 3150 * @param[in] action 3151 * Pointer to the action structure. 3152 * @param[in] action_flags 3153 * Holds the actions detected until now. 3154 * @param[out] error 3155 * Pointer to error structure. 3156 * 3157 * @return 3158 * 0 on success, a negative errno value otherwise and rte_errno is set. 3159 */ 3160 static int 3161 flow_dv_validate_action_count(struct rte_eth_dev *dev, 3162 const struct rte_flow_action *action, 3163 uint64_t action_flags, 3164 struct rte_flow_error *error) 3165 { 3166 struct mlx5_priv *priv = dev->data->dev_private; 3167 const struct rte_flow_action_count *count; 3168 3169 if (!priv->config.devx) 3170 goto notsup_err; 3171 if (action_flags & MLX5_FLOW_ACTION_COUNT) 3172 return rte_flow_error_set(error, EINVAL, 3173 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3174 "duplicate count actions set"); 3175 count = (const struct rte_flow_action_count *)action->conf; 3176 if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) && 3177 !priv->sh->flow_hit_aso_en) 3178 return rte_flow_error_set(error, EINVAL, 3179 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3180 "old age and shared count combination is not supported"); 3181 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 3182 return 0; 3183 #endif 3184 notsup_err: 3185 return rte_flow_error_set 3186 (error, ENOTSUP, 3187 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3188 NULL, 3189 "count action not supported"); 3190 } 3191 3192 /** 3193 * Validate the L2 encap action. 3194 * 3195 * @param[in] dev 3196 * Pointer to the rte_eth_dev structure. 3197 * @param[in] action_flags 3198 * Holds the actions detected until now. 3199 * @param[in] action 3200 * Pointer to the action structure. 3201 * @param[in] attr 3202 * Pointer to flow attributes. 3203 * @param[out] error 3204 * Pointer to error structure. 3205 * 3206 * @return 3207 * 0 on success, a negative errno value otherwise and rte_errno is set. 3208 */ 3209 static int 3210 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, 3211 uint64_t action_flags, 3212 const struct rte_flow_action *action, 3213 const struct rte_flow_attr *attr, 3214 struct rte_flow_error *error) 3215 { 3216 const struct mlx5_priv *priv = dev->data->dev_private; 3217 3218 if (!(action->conf)) 3219 return rte_flow_error_set(error, EINVAL, 3220 RTE_FLOW_ERROR_TYPE_ACTION, action, 3221 "configuration cannot be null"); 3222 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 3223 return rte_flow_error_set(error, EINVAL, 3224 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3225 "can only have a single encap action " 3226 "in a flow"); 3227 if (!attr->transfer && priv->representor) 3228 return rte_flow_error_set(error, ENOTSUP, 3229 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3230 "encap action for VF representor " 3231 "not supported on NIC table"); 3232 return 0; 3233 } 3234 3235 /** 3236 * Validate a decap action. 3237 * 3238 * @param[in] dev 3239 * Pointer to the rte_eth_dev structure. 3240 * @param[in] action_flags 3241 * Holds the actions detected until now. 3242 * @param[in] action 3243 * Pointer to the action structure. 3244 * @param[in] item_flags 3245 * Holds the items detected. 3246 * @param[in] attr 3247 * Pointer to flow attributes 3248 * @param[out] error 3249 * Pointer to error structure. 3250 * 3251 * @return 3252 * 0 on success, a negative errno value otherwise and rte_errno is set. 3253 */ 3254 static int 3255 flow_dv_validate_action_decap(struct rte_eth_dev *dev, 3256 uint64_t action_flags, 3257 const struct rte_flow_action *action, 3258 const uint64_t item_flags, 3259 const struct rte_flow_attr *attr, 3260 struct rte_flow_error *error) 3261 { 3262 const struct mlx5_priv *priv = dev->data->dev_private; 3263 3264 if (priv->config.hca_attr.scatter_fcs_w_decap_disable && 3265 !priv->config.decap_en) 3266 return rte_flow_error_set(error, ENOTSUP, 3267 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3268 "decap is not enabled"); 3269 if (action_flags & MLX5_FLOW_XCAP_ACTIONS) 3270 return rte_flow_error_set(error, ENOTSUP, 3271 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3272 action_flags & 3273 MLX5_FLOW_ACTION_DECAP ? "can only " 3274 "have a single decap action" : "decap " 3275 "after encap is not supported"); 3276 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) 3277 return rte_flow_error_set(error, EINVAL, 3278 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3279 "can't have decap action after" 3280 " modify action"); 3281 if (attr->egress) 3282 return rte_flow_error_set(error, ENOTSUP, 3283 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 3284 NULL, 3285 "decap action not supported for " 3286 "egress"); 3287 if (!attr->transfer && priv->representor) 3288 return rte_flow_error_set(error, ENOTSUP, 3289 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3290 "decap action for VF representor " 3291 "not supported on NIC table"); 3292 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP && 3293 !(item_flags & MLX5_FLOW_LAYER_VXLAN)) 3294 return rte_flow_error_set(error, ENOTSUP, 3295 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3296 "VXLAN item should be present for VXLAN decap"); 3297 return 0; 3298 } 3299 3300 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; 3301 3302 /** 3303 * Validate the raw encap and decap actions. 3304 * 3305 * @param[in] dev 3306 * Pointer to the rte_eth_dev structure. 3307 * @param[in] decap 3308 * Pointer to the decap action. 3309 * @param[in] encap 3310 * Pointer to the encap action. 3311 * @param[in] attr 3312 * Pointer to flow attributes 3313 * @param[in/out] action_flags 3314 * Holds the actions detected until now. 3315 * @param[out] actions_n 3316 * pointer to the number of actions counter. 3317 * @param[in] action 3318 * Pointer to the action structure. 3319 * @param[in] item_flags 3320 * Holds the items detected. 3321 * @param[out] error 3322 * Pointer to error structure. 3323 * 3324 * @return 3325 * 0 on success, a negative errno value otherwise and rte_errno is set. 3326 */ 3327 static int 3328 flow_dv_validate_action_raw_encap_decap 3329 (struct rte_eth_dev *dev, 3330 const struct rte_flow_action_raw_decap *decap, 3331 const struct rte_flow_action_raw_encap *encap, 3332 const struct rte_flow_attr *attr, uint64_t *action_flags, 3333 int *actions_n, const struct rte_flow_action *action, 3334 uint64_t item_flags, struct rte_flow_error *error) 3335 { 3336 const struct mlx5_priv *priv = dev->data->dev_private; 3337 int ret; 3338 3339 if (encap && (!encap->size || !encap->data)) 3340 return rte_flow_error_set(error, EINVAL, 3341 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3342 "raw encap data cannot be empty"); 3343 if (decap && encap) { 3344 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && 3345 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 3346 /* L3 encap. */ 3347 decap = NULL; 3348 else if (encap->size <= 3349 MLX5_ENCAPSULATION_DECISION_SIZE && 3350 decap->size > 3351 MLX5_ENCAPSULATION_DECISION_SIZE) 3352 /* L3 decap. */ 3353 encap = NULL; 3354 else if (encap->size > 3355 MLX5_ENCAPSULATION_DECISION_SIZE && 3356 decap->size > 3357 MLX5_ENCAPSULATION_DECISION_SIZE) 3358 /* 2 L2 actions: encap and decap. */ 3359 ; 3360 else 3361 return rte_flow_error_set(error, 3362 ENOTSUP, 3363 RTE_FLOW_ERROR_TYPE_ACTION, 3364 NULL, "unsupported too small " 3365 "raw decap and too small raw " 3366 "encap combination"); 3367 } 3368 if (decap) { 3369 ret = flow_dv_validate_action_decap(dev, *action_flags, action, 3370 item_flags, attr, error); 3371 if (ret < 0) 3372 return ret; 3373 *action_flags |= MLX5_FLOW_ACTION_DECAP; 3374 ++(*actions_n); 3375 } 3376 if (encap) { 3377 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE) 3378 return rte_flow_error_set(error, ENOTSUP, 3379 RTE_FLOW_ERROR_TYPE_ACTION, 3380 NULL, 3381 "small raw encap size"); 3382 if (*action_flags & MLX5_FLOW_ACTION_ENCAP) 3383 return rte_flow_error_set(error, EINVAL, 3384 RTE_FLOW_ERROR_TYPE_ACTION, 3385 NULL, 3386 "more than one encap action"); 3387 if (!attr->transfer && priv->representor) 3388 return rte_flow_error_set 3389 (error, ENOTSUP, 3390 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3391 "encap action for VF representor " 3392 "not supported on NIC table"); 3393 *action_flags |= MLX5_FLOW_ACTION_ENCAP; 3394 ++(*actions_n); 3395 } 3396 return 0; 3397 } 3398 3399 /** 3400 * Match encap_decap resource. 3401 * 3402 * @param list 3403 * Pointer to the hash list. 3404 * @param entry 3405 * Pointer to exist resource entry object. 3406 * @param key 3407 * Key of the new entry. 3408 * @param ctx_cb 3409 * Pointer to new encap_decap resource. 3410 * 3411 * @return 3412 * 0 on matching, none-zero otherwise. 3413 */ 3414 int 3415 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, 3416 struct mlx5_hlist_entry *entry, 3417 uint64_t key __rte_unused, void *cb_ctx) 3418 { 3419 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3420 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; 3421 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 3422 3423 cache_resource = container_of(entry, 3424 struct mlx5_flow_dv_encap_decap_resource, 3425 entry); 3426 if (resource->reformat_type == cache_resource->reformat_type && 3427 resource->ft_type == cache_resource->ft_type && 3428 resource->flags == cache_resource->flags && 3429 resource->size == cache_resource->size && 3430 !memcmp((const void *)resource->buf, 3431 (const void *)cache_resource->buf, 3432 resource->size)) 3433 return 0; 3434 return -1; 3435 } 3436 3437 /** 3438 * Allocate encap_decap resource. 3439 * 3440 * @param list 3441 * Pointer to the hash list. 3442 * @param entry 3443 * Pointer to exist resource entry object. 3444 * @param ctx_cb 3445 * Pointer to new encap_decap resource. 3446 * 3447 * @return 3448 * 0 on matching, none-zero otherwise. 3449 */ 3450 struct mlx5_hlist_entry * 3451 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, 3452 uint64_t key __rte_unused, 3453 void *cb_ctx) 3454 { 3455 struct mlx5_dev_ctx_shared *sh = list->ctx; 3456 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3457 struct mlx5dv_dr_domain *domain; 3458 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; 3459 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 3460 uint32_t idx; 3461 int ret; 3462 3463 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 3464 domain = sh->fdb_domain; 3465 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 3466 domain = sh->rx_domain; 3467 else 3468 domain = sh->tx_domain; 3469 /* Register new encap/decap resource. */ 3470 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 3471 &idx); 3472 if (!cache_resource) { 3473 rte_flow_error_set(ctx->error, ENOMEM, 3474 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3475 "cannot allocate resource memory"); 3476 return NULL; 3477 } 3478 *cache_resource = *resource; 3479 cache_resource->idx = idx; 3480 ret = mlx5_flow_os_create_flow_action_packet_reformat 3481 (sh->ctx, domain, cache_resource, 3482 &cache_resource->action); 3483 if (ret) { 3484 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); 3485 rte_flow_error_set(ctx->error, ENOMEM, 3486 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3487 NULL, "cannot create action"); 3488 return NULL; 3489 } 3490 3491 return &cache_resource->entry; 3492 } 3493 3494 /** 3495 * Find existing encap/decap resource or create and register a new one. 3496 * 3497 * @param[in, out] dev 3498 * Pointer to rte_eth_dev structure. 3499 * @param[in, out] resource 3500 * Pointer to encap/decap resource. 3501 * @parm[in, out] dev_flow 3502 * Pointer to the dev_flow. 3503 * @param[out] error 3504 * pointer to error structure. 3505 * 3506 * @return 3507 * 0 on success otherwise -errno and errno is set. 3508 */ 3509 static int 3510 flow_dv_encap_decap_resource_register 3511 (struct rte_eth_dev *dev, 3512 struct mlx5_flow_dv_encap_decap_resource *resource, 3513 struct mlx5_flow *dev_flow, 3514 struct rte_flow_error *error) 3515 { 3516 struct mlx5_priv *priv = dev->data->dev_private; 3517 struct mlx5_dev_ctx_shared *sh = priv->sh; 3518 struct mlx5_hlist_entry *entry; 3519 union { 3520 struct { 3521 uint32_t ft_type:8; 3522 uint32_t refmt_type:8; 3523 /* 3524 * Header reformat actions can be shared between 3525 * non-root tables. One bit to indicate non-root 3526 * table or not. 3527 */ 3528 uint32_t is_root:1; 3529 uint32_t reserve:15; 3530 }; 3531 uint32_t v32; 3532 } encap_decap_key = { 3533 { 3534 .ft_type = resource->ft_type, 3535 .refmt_type = resource->reformat_type, 3536 .is_root = !!dev_flow->dv.group, 3537 .reserve = 0, 3538 } 3539 }; 3540 struct mlx5_flow_cb_ctx ctx = { 3541 .error = error, 3542 .data = resource, 3543 }; 3544 uint64_t key64; 3545 3546 resource->flags = dev_flow->dv.group ? 0 : 1; 3547 key64 = __rte_raw_cksum(&encap_decap_key.v32, 3548 sizeof(encap_decap_key.v32), 0); 3549 if (resource->reformat_type != 3550 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 && 3551 resource->size) 3552 key64 = __rte_raw_cksum(resource->buf, resource->size, key64); 3553 entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx); 3554 if (!entry) 3555 return -rte_errno; 3556 resource = container_of(entry, typeof(*resource), entry); 3557 dev_flow->dv.encap_decap = resource; 3558 dev_flow->handle->dvh.rix_encap_decap = resource->idx; 3559 return 0; 3560 } 3561 3562 /** 3563 * Find existing table jump resource or create and register a new one. 3564 * 3565 * @param[in, out] dev 3566 * Pointer to rte_eth_dev structure. 3567 * @param[in, out] tbl 3568 * Pointer to flow table resource. 3569 * @parm[in, out] dev_flow 3570 * Pointer to the dev_flow. 3571 * @param[out] error 3572 * pointer to error structure. 3573 * 3574 * @return 3575 * 0 on success otherwise -errno and errno is set. 3576 */ 3577 static int 3578 flow_dv_jump_tbl_resource_register 3579 (struct rte_eth_dev *dev __rte_unused, 3580 struct mlx5_flow_tbl_resource *tbl, 3581 struct mlx5_flow *dev_flow, 3582 struct rte_flow_error *error __rte_unused) 3583 { 3584 struct mlx5_flow_tbl_data_entry *tbl_data = 3585 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 3586 3587 MLX5_ASSERT(tbl); 3588 MLX5_ASSERT(tbl_data->jump.action); 3589 dev_flow->handle->rix_jump = tbl_data->idx; 3590 dev_flow->dv.jump = &tbl_data->jump; 3591 return 0; 3592 } 3593 3594 int 3595 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused, 3596 struct mlx5_cache_entry *entry, void *cb_ctx) 3597 { 3598 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3599 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; 3600 struct mlx5_flow_dv_port_id_action_resource *res = 3601 container_of(entry, typeof(*res), entry); 3602 3603 return ref->port_id != res->port_id; 3604 } 3605 3606 struct mlx5_cache_entry * 3607 flow_dv_port_id_create_cb(struct mlx5_cache_list *list, 3608 struct mlx5_cache_entry *entry __rte_unused, 3609 void *cb_ctx) 3610 { 3611 struct mlx5_dev_ctx_shared *sh = list->ctx; 3612 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3613 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; 3614 struct mlx5_flow_dv_port_id_action_resource *cache; 3615 uint32_t idx; 3616 int ret; 3617 3618 /* Register new port id action resource. */ 3619 cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx); 3620 if (!cache) { 3621 rte_flow_error_set(ctx->error, ENOMEM, 3622 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3623 "cannot allocate port_id action cache memory"); 3624 return NULL; 3625 } 3626 *cache = *ref; 3627 ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain, 3628 ref->port_id, 3629 &cache->action); 3630 if (ret) { 3631 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx); 3632 rte_flow_error_set(ctx->error, ENOMEM, 3633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3634 "cannot create action"); 3635 return NULL; 3636 } 3637 cache->idx = idx; 3638 return &cache->entry; 3639 } 3640 3641 /** 3642 * Find existing table port ID resource or create and register a new one. 3643 * 3644 * @param[in, out] dev 3645 * Pointer to rte_eth_dev structure. 3646 * @param[in, out] resource 3647 * Pointer to port ID action resource. 3648 * @parm[in, out] dev_flow 3649 * Pointer to the dev_flow. 3650 * @param[out] error 3651 * pointer to error structure. 3652 * 3653 * @return 3654 * 0 on success otherwise -errno and errno is set. 3655 */ 3656 static int 3657 flow_dv_port_id_action_resource_register 3658 (struct rte_eth_dev *dev, 3659 struct mlx5_flow_dv_port_id_action_resource *resource, 3660 struct mlx5_flow *dev_flow, 3661 struct rte_flow_error *error) 3662 { 3663 struct mlx5_priv *priv = dev->data->dev_private; 3664 struct mlx5_cache_entry *entry; 3665 struct mlx5_flow_dv_port_id_action_resource *cache; 3666 struct mlx5_flow_cb_ctx ctx = { 3667 .error = error, 3668 .data = resource, 3669 }; 3670 3671 entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx); 3672 if (!entry) 3673 return -rte_errno; 3674 cache = container_of(entry, typeof(*cache), entry); 3675 dev_flow->dv.port_id_action = cache; 3676 dev_flow->handle->rix_port_id_action = cache->idx; 3677 return 0; 3678 } 3679 3680 int 3681 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused, 3682 struct mlx5_cache_entry *entry, void *cb_ctx) 3683 { 3684 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3685 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; 3686 struct mlx5_flow_dv_push_vlan_action_resource *res = 3687 container_of(entry, typeof(*res), entry); 3688 3689 return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type; 3690 } 3691 3692 struct mlx5_cache_entry * 3693 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list, 3694 struct mlx5_cache_entry *entry __rte_unused, 3695 void *cb_ctx) 3696 { 3697 struct mlx5_dev_ctx_shared *sh = list->ctx; 3698 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 3699 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; 3700 struct mlx5_flow_dv_push_vlan_action_resource *cache; 3701 struct mlx5dv_dr_domain *domain; 3702 uint32_t idx; 3703 int ret; 3704 3705 /* Register new port id action resource. */ 3706 cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx); 3707 if (!cache) { 3708 rte_flow_error_set(ctx->error, ENOMEM, 3709 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3710 "cannot allocate push_vlan action cache memory"); 3711 return NULL; 3712 } 3713 *cache = *ref; 3714 if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 3715 domain = sh->fdb_domain; 3716 else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 3717 domain = sh->rx_domain; 3718 else 3719 domain = sh->tx_domain; 3720 ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag, 3721 &cache->action); 3722 if (ret) { 3723 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); 3724 rte_flow_error_set(ctx->error, ENOMEM, 3725 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3726 "cannot create push vlan action"); 3727 return NULL; 3728 } 3729 cache->idx = idx; 3730 return &cache->entry; 3731 } 3732 3733 /** 3734 * Find existing push vlan resource or create and register a new one. 3735 * 3736 * @param [in, out] dev 3737 * Pointer to rte_eth_dev structure. 3738 * @param[in, out] resource 3739 * Pointer to port ID action resource. 3740 * @parm[in, out] dev_flow 3741 * Pointer to the dev_flow. 3742 * @param[out] error 3743 * pointer to error structure. 3744 * 3745 * @return 3746 * 0 on success otherwise -errno and errno is set. 3747 */ 3748 static int 3749 flow_dv_push_vlan_action_resource_register 3750 (struct rte_eth_dev *dev, 3751 struct mlx5_flow_dv_push_vlan_action_resource *resource, 3752 struct mlx5_flow *dev_flow, 3753 struct rte_flow_error *error) 3754 { 3755 struct mlx5_priv *priv = dev->data->dev_private; 3756 struct mlx5_flow_dv_push_vlan_action_resource *cache; 3757 struct mlx5_cache_entry *entry; 3758 struct mlx5_flow_cb_ctx ctx = { 3759 .error = error, 3760 .data = resource, 3761 }; 3762 3763 entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx); 3764 if (!entry) 3765 return -rte_errno; 3766 cache = container_of(entry, typeof(*cache), entry); 3767 3768 dev_flow->handle->dvh.rix_push_vlan = cache->idx; 3769 dev_flow->dv.push_vlan_res = cache; 3770 return 0; 3771 } 3772 3773 /** 3774 * Get the size of specific rte_flow_item_type hdr size 3775 * 3776 * @param[in] item_type 3777 * Tested rte_flow_item_type. 3778 * 3779 * @return 3780 * sizeof struct item_type, 0 if void or irrelevant. 3781 */ 3782 static size_t 3783 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) 3784 { 3785 size_t retval; 3786 3787 switch (item_type) { 3788 case RTE_FLOW_ITEM_TYPE_ETH: 3789 retval = sizeof(struct rte_ether_hdr); 3790 break; 3791 case RTE_FLOW_ITEM_TYPE_VLAN: 3792 retval = sizeof(struct rte_vlan_hdr); 3793 break; 3794 case RTE_FLOW_ITEM_TYPE_IPV4: 3795 retval = sizeof(struct rte_ipv4_hdr); 3796 break; 3797 case RTE_FLOW_ITEM_TYPE_IPV6: 3798 retval = sizeof(struct rte_ipv6_hdr); 3799 break; 3800 case RTE_FLOW_ITEM_TYPE_UDP: 3801 retval = sizeof(struct rte_udp_hdr); 3802 break; 3803 case RTE_FLOW_ITEM_TYPE_TCP: 3804 retval = sizeof(struct rte_tcp_hdr); 3805 break; 3806 case RTE_FLOW_ITEM_TYPE_VXLAN: 3807 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 3808 retval = sizeof(struct rte_vxlan_hdr); 3809 break; 3810 case RTE_FLOW_ITEM_TYPE_GRE: 3811 case RTE_FLOW_ITEM_TYPE_NVGRE: 3812 retval = sizeof(struct rte_gre_hdr); 3813 break; 3814 case RTE_FLOW_ITEM_TYPE_MPLS: 3815 retval = sizeof(struct rte_mpls_hdr); 3816 break; 3817 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ 3818 default: 3819 retval = 0; 3820 break; 3821 } 3822 return retval; 3823 } 3824 3825 #define MLX5_ENCAP_IPV4_VERSION 0x40 3826 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05 3827 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40 3828 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000 3829 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff 3830 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000 3831 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04 3832 3833 /** 3834 * Convert the encap action data from list of rte_flow_item to raw buffer 3835 * 3836 * @param[in] items 3837 * Pointer to rte_flow_item objects list. 3838 * @param[out] buf 3839 * Pointer to the output buffer. 3840 * @param[out] size 3841 * Pointer to the output buffer size. 3842 * @param[out] error 3843 * Pointer to the error structure. 3844 * 3845 * @return 3846 * 0 on success, a negative errno value otherwise and rte_errno is set. 3847 */ 3848 static int 3849 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 3850 size_t *size, struct rte_flow_error *error) 3851 { 3852 struct rte_ether_hdr *eth = NULL; 3853 struct rte_vlan_hdr *vlan = NULL; 3854 struct rte_ipv4_hdr *ipv4 = NULL; 3855 struct rte_ipv6_hdr *ipv6 = NULL; 3856 struct rte_udp_hdr *udp = NULL; 3857 struct rte_vxlan_hdr *vxlan = NULL; 3858 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL; 3859 struct rte_gre_hdr *gre = NULL; 3860 size_t len; 3861 size_t temp_size = 0; 3862 3863 if (!items) 3864 return rte_flow_error_set(error, EINVAL, 3865 RTE_FLOW_ERROR_TYPE_ACTION, 3866 NULL, "invalid empty data"); 3867 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3868 len = flow_dv_get_item_hdr_len(items->type); 3869 if (len + temp_size > MLX5_ENCAP_MAX_LEN) 3870 return rte_flow_error_set(error, EINVAL, 3871 RTE_FLOW_ERROR_TYPE_ACTION, 3872 (void *)items->type, 3873 "items total size is too big" 3874 " for encap action"); 3875 rte_memcpy((void *)&buf[temp_size], items->spec, len); 3876 switch (items->type) { 3877 case RTE_FLOW_ITEM_TYPE_ETH: 3878 eth = (struct rte_ether_hdr *)&buf[temp_size]; 3879 break; 3880 case RTE_FLOW_ITEM_TYPE_VLAN: 3881 vlan = (struct rte_vlan_hdr *)&buf[temp_size]; 3882 if (!eth) 3883 return rte_flow_error_set(error, EINVAL, 3884 RTE_FLOW_ERROR_TYPE_ACTION, 3885 (void *)items->type, 3886 "eth header not found"); 3887 if (!eth->ether_type) 3888 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); 3889 break; 3890 case RTE_FLOW_ITEM_TYPE_IPV4: 3891 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size]; 3892 if (!vlan && !eth) 3893 return rte_flow_error_set(error, EINVAL, 3894 RTE_FLOW_ERROR_TYPE_ACTION, 3895 (void *)items->type, 3896 "neither eth nor vlan" 3897 " header found"); 3898 if (vlan && !vlan->eth_proto) 3899 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3900 else if (eth && !eth->ether_type) 3901 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3902 if (!ipv4->version_ihl) 3903 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION | 3904 MLX5_ENCAP_IPV4_IHL_MIN; 3905 if (!ipv4->time_to_live) 3906 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF; 3907 break; 3908 case RTE_FLOW_ITEM_TYPE_IPV6: 3909 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size]; 3910 if (!vlan && !eth) 3911 return rte_flow_error_set(error, EINVAL, 3912 RTE_FLOW_ERROR_TYPE_ACTION, 3913 (void *)items->type, 3914 "neither eth nor vlan" 3915 " header found"); 3916 if (vlan && !vlan->eth_proto) 3917 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3918 else if (eth && !eth->ether_type) 3919 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3920 if (!ipv6->vtc_flow) 3921 ipv6->vtc_flow = 3922 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW); 3923 if (!ipv6->hop_limits) 3924 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT; 3925 break; 3926 case RTE_FLOW_ITEM_TYPE_UDP: 3927 udp = (struct rte_udp_hdr *)&buf[temp_size]; 3928 if (!ipv4 && !ipv6) 3929 return rte_flow_error_set(error, EINVAL, 3930 RTE_FLOW_ERROR_TYPE_ACTION, 3931 (void *)items->type, 3932 "ip header not found"); 3933 if (ipv4 && !ipv4->next_proto_id) 3934 ipv4->next_proto_id = IPPROTO_UDP; 3935 else if (ipv6 && !ipv6->proto) 3936 ipv6->proto = IPPROTO_UDP; 3937 break; 3938 case RTE_FLOW_ITEM_TYPE_VXLAN: 3939 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size]; 3940 if (!udp) 3941 return rte_flow_error_set(error, EINVAL, 3942 RTE_FLOW_ERROR_TYPE_ACTION, 3943 (void *)items->type, 3944 "udp header not found"); 3945 if (!udp->dst_port) 3946 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN); 3947 if (!vxlan->vx_flags) 3948 vxlan->vx_flags = 3949 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS); 3950 break; 3951 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 3952 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size]; 3953 if (!udp) 3954 return rte_flow_error_set(error, EINVAL, 3955 RTE_FLOW_ERROR_TYPE_ACTION, 3956 (void *)items->type, 3957 "udp header not found"); 3958 if (!vxlan_gpe->proto) 3959 return rte_flow_error_set(error, EINVAL, 3960 RTE_FLOW_ERROR_TYPE_ACTION, 3961 (void *)items->type, 3962 "next protocol not found"); 3963 if (!udp->dst_port) 3964 udp->dst_port = 3965 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE); 3966 if (!vxlan_gpe->vx_flags) 3967 vxlan_gpe->vx_flags = 3968 MLX5_ENCAP_VXLAN_GPE_FLAGS; 3969 break; 3970 case RTE_FLOW_ITEM_TYPE_GRE: 3971 case RTE_FLOW_ITEM_TYPE_NVGRE: 3972 gre = (struct rte_gre_hdr *)&buf[temp_size]; 3973 if (!gre->proto) 3974 return rte_flow_error_set(error, EINVAL, 3975 RTE_FLOW_ERROR_TYPE_ACTION, 3976 (void *)items->type, 3977 "next protocol not found"); 3978 if (!ipv4 && !ipv6) 3979 return rte_flow_error_set(error, EINVAL, 3980 RTE_FLOW_ERROR_TYPE_ACTION, 3981 (void *)items->type, 3982 "ip header not found"); 3983 if (ipv4 && !ipv4->next_proto_id) 3984 ipv4->next_proto_id = IPPROTO_GRE; 3985 else if (ipv6 && !ipv6->proto) 3986 ipv6->proto = IPPROTO_GRE; 3987 break; 3988 case RTE_FLOW_ITEM_TYPE_VOID: 3989 break; 3990 default: 3991 return rte_flow_error_set(error, EINVAL, 3992 RTE_FLOW_ERROR_TYPE_ACTION, 3993 (void *)items->type, 3994 "unsupported item type"); 3995 break; 3996 } 3997 temp_size += len; 3998 } 3999 *size = temp_size; 4000 return 0; 4001 } 4002 4003 static int 4004 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) 4005 { 4006 struct rte_ether_hdr *eth = NULL; 4007 struct rte_vlan_hdr *vlan = NULL; 4008 struct rte_ipv6_hdr *ipv6 = NULL; 4009 struct rte_udp_hdr *udp = NULL; 4010 char *next_hdr; 4011 uint16_t proto; 4012 4013 eth = (struct rte_ether_hdr *)data; 4014 next_hdr = (char *)(eth + 1); 4015 proto = RTE_BE16(eth->ether_type); 4016 4017 /* VLAN skipping */ 4018 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) { 4019 vlan = (struct rte_vlan_hdr *)next_hdr; 4020 proto = RTE_BE16(vlan->eth_proto); 4021 next_hdr += sizeof(struct rte_vlan_hdr); 4022 } 4023 4024 /* HW calculates IPv4 csum. no need to proceed */ 4025 if (proto == RTE_ETHER_TYPE_IPV4) 4026 return 0; 4027 4028 /* non IPv4/IPv6 header. not supported */ 4029 if (proto != RTE_ETHER_TYPE_IPV6) { 4030 return rte_flow_error_set(error, ENOTSUP, 4031 RTE_FLOW_ERROR_TYPE_ACTION, 4032 NULL, "Cannot offload non IPv4/IPv6"); 4033 } 4034 4035 ipv6 = (struct rte_ipv6_hdr *)next_hdr; 4036 4037 /* ignore non UDP */ 4038 if (ipv6->proto != IPPROTO_UDP) 4039 return 0; 4040 4041 udp = (struct rte_udp_hdr *)(ipv6 + 1); 4042 udp->dgram_cksum = 0; 4043 4044 return 0; 4045 } 4046 4047 /** 4048 * Convert L2 encap action to DV specification. 4049 * 4050 * @param[in] dev 4051 * Pointer to rte_eth_dev structure. 4052 * @param[in] action 4053 * Pointer to action structure. 4054 * @param[in, out] dev_flow 4055 * Pointer to the mlx5_flow. 4056 * @param[in] transfer 4057 * Mark if the flow is E-Switch flow. 4058 * @param[out] error 4059 * Pointer to the error structure. 4060 * 4061 * @return 4062 * 0 on success, a negative errno value otherwise and rte_errno is set. 4063 */ 4064 static int 4065 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, 4066 const struct rte_flow_action *action, 4067 struct mlx5_flow *dev_flow, 4068 uint8_t transfer, 4069 struct rte_flow_error *error) 4070 { 4071 const struct rte_flow_item *encap_data; 4072 const struct rte_flow_action_raw_encap *raw_encap_data; 4073 struct mlx5_flow_dv_encap_decap_resource res = { 4074 .reformat_type = 4075 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL, 4076 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 4077 MLX5DV_FLOW_TABLE_TYPE_NIC_TX, 4078 }; 4079 4080 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 4081 raw_encap_data = 4082 (const struct rte_flow_action_raw_encap *)action->conf; 4083 res.size = raw_encap_data->size; 4084 memcpy(res.buf, raw_encap_data->data, res.size); 4085 } else { 4086 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) 4087 encap_data = 4088 ((const struct rte_flow_action_vxlan_encap *) 4089 action->conf)->definition; 4090 else 4091 encap_data = 4092 ((const struct rte_flow_action_nvgre_encap *) 4093 action->conf)->definition; 4094 if (flow_dv_convert_encap_data(encap_data, res.buf, 4095 &res.size, error)) 4096 return -rte_errno; 4097 } 4098 if (flow_dv_zero_encap_udp_csum(res.buf, error)) 4099 return -rte_errno; 4100 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 4101 return rte_flow_error_set(error, EINVAL, 4102 RTE_FLOW_ERROR_TYPE_ACTION, 4103 NULL, "can't create L2 encap action"); 4104 return 0; 4105 } 4106 4107 /** 4108 * Convert L2 decap action to DV specification. 4109 * 4110 * @param[in] dev 4111 * Pointer to rte_eth_dev structure. 4112 * @param[in, out] dev_flow 4113 * Pointer to the mlx5_flow. 4114 * @param[in] transfer 4115 * Mark if the flow is E-Switch flow. 4116 * @param[out] error 4117 * Pointer to the error structure. 4118 * 4119 * @return 4120 * 0 on success, a negative errno value otherwise and rte_errno is set. 4121 */ 4122 static int 4123 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev, 4124 struct mlx5_flow *dev_flow, 4125 uint8_t transfer, 4126 struct rte_flow_error *error) 4127 { 4128 struct mlx5_flow_dv_encap_decap_resource res = { 4129 .size = 0, 4130 .reformat_type = 4131 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2, 4132 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 4133 MLX5DV_FLOW_TABLE_TYPE_NIC_RX, 4134 }; 4135 4136 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 4137 return rte_flow_error_set(error, EINVAL, 4138 RTE_FLOW_ERROR_TYPE_ACTION, 4139 NULL, "can't create L2 decap action"); 4140 return 0; 4141 } 4142 4143 /** 4144 * Convert raw decap/encap (L3 tunnel) action to DV specification. 4145 * 4146 * @param[in] dev 4147 * Pointer to rte_eth_dev structure. 4148 * @param[in] action 4149 * Pointer to action structure. 4150 * @param[in, out] dev_flow 4151 * Pointer to the mlx5_flow. 4152 * @param[in] attr 4153 * Pointer to the flow attributes. 4154 * @param[out] error 4155 * Pointer to the error structure. 4156 * 4157 * @return 4158 * 0 on success, a negative errno value otherwise and rte_errno is set. 4159 */ 4160 static int 4161 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, 4162 const struct rte_flow_action *action, 4163 struct mlx5_flow *dev_flow, 4164 const struct rte_flow_attr *attr, 4165 struct rte_flow_error *error) 4166 { 4167 const struct rte_flow_action_raw_encap *encap_data; 4168 struct mlx5_flow_dv_encap_decap_resource res; 4169 4170 memset(&res, 0, sizeof(res)); 4171 encap_data = (const struct rte_flow_action_raw_encap *)action->conf; 4172 res.size = encap_data->size; 4173 memcpy(res.buf, encap_data->data, res.size); 4174 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ? 4175 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 : 4176 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 4177 if (attr->transfer) 4178 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 4179 else 4180 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 4181 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 4182 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 4183 return rte_flow_error_set(error, EINVAL, 4184 RTE_FLOW_ERROR_TYPE_ACTION, 4185 NULL, "can't create encap action"); 4186 return 0; 4187 } 4188 4189 /** 4190 * Create action push VLAN. 4191 * 4192 * @param[in] dev 4193 * Pointer to rte_eth_dev structure. 4194 * @param[in] attr 4195 * Pointer to the flow attributes. 4196 * @param[in] vlan 4197 * Pointer to the vlan to push to the Ethernet header. 4198 * @param[in, out] dev_flow 4199 * Pointer to the mlx5_flow. 4200 * @param[out] error 4201 * Pointer to the error structure. 4202 * 4203 * @return 4204 * 0 on success, a negative errno value otherwise and rte_errno is set. 4205 */ 4206 static int 4207 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, 4208 const struct rte_flow_attr *attr, 4209 const struct rte_vlan_hdr *vlan, 4210 struct mlx5_flow *dev_flow, 4211 struct rte_flow_error *error) 4212 { 4213 struct mlx5_flow_dv_push_vlan_action_resource res; 4214 4215 memset(&res, 0, sizeof(res)); 4216 res.vlan_tag = 4217 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 | 4218 vlan->vlan_tci); 4219 if (attr->transfer) 4220 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 4221 else 4222 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 4223 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 4224 return flow_dv_push_vlan_action_resource_register 4225 (dev, &res, dev_flow, error); 4226 } 4227 4228 /** 4229 * Validate the modify-header actions. 4230 * 4231 * @param[in] action_flags 4232 * Holds the actions detected until now. 4233 * @param[in] action 4234 * Pointer to the modify action. 4235 * @param[out] error 4236 * Pointer to error structure. 4237 * 4238 * @return 4239 * 0 on success, a negative errno value otherwise and rte_errno is set. 4240 */ 4241 static int 4242 flow_dv_validate_action_modify_hdr(const uint64_t action_flags, 4243 const struct rte_flow_action *action, 4244 struct rte_flow_error *error) 4245 { 4246 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf) 4247 return rte_flow_error_set(error, EINVAL, 4248 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 4249 NULL, "action configuration not set"); 4250 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 4251 return rte_flow_error_set(error, EINVAL, 4252 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4253 "can't have encap action before" 4254 " modify action"); 4255 return 0; 4256 } 4257 4258 /** 4259 * Validate the modify-header MAC address actions. 4260 * 4261 * @param[in] action_flags 4262 * Holds the actions detected until now. 4263 * @param[in] action 4264 * Pointer to the modify action. 4265 * @param[in] item_flags 4266 * Holds the items detected. 4267 * @param[out] error 4268 * Pointer to error structure. 4269 * 4270 * @return 4271 * 0 on success, a negative errno value otherwise and rte_errno is set. 4272 */ 4273 static int 4274 flow_dv_validate_action_modify_mac(const uint64_t action_flags, 4275 const struct rte_flow_action *action, 4276 const uint64_t item_flags, 4277 struct rte_flow_error *error) 4278 { 4279 int ret = 0; 4280 4281 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4282 if (!ret) { 4283 if (!(item_flags & MLX5_FLOW_LAYER_L2)) 4284 return rte_flow_error_set(error, EINVAL, 4285 RTE_FLOW_ERROR_TYPE_ACTION, 4286 NULL, 4287 "no L2 item in pattern"); 4288 } 4289 return ret; 4290 } 4291 4292 /** 4293 * Validate the modify-header IPv4 address actions. 4294 * 4295 * @param[in] action_flags 4296 * Holds the actions detected until now. 4297 * @param[in] action 4298 * Pointer to the modify action. 4299 * @param[in] item_flags 4300 * Holds the items detected. 4301 * @param[out] error 4302 * Pointer to error structure. 4303 * 4304 * @return 4305 * 0 on success, a negative errno value otherwise and rte_errno is set. 4306 */ 4307 static int 4308 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, 4309 const struct rte_flow_action *action, 4310 const uint64_t item_flags, 4311 struct rte_flow_error *error) 4312 { 4313 int ret = 0; 4314 uint64_t layer; 4315 4316 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4317 if (!ret) { 4318 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4319 MLX5_FLOW_LAYER_INNER_L3_IPV4 : 4320 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 4321 if (!(item_flags & layer)) 4322 return rte_flow_error_set(error, EINVAL, 4323 RTE_FLOW_ERROR_TYPE_ACTION, 4324 NULL, 4325 "no ipv4 item in pattern"); 4326 } 4327 return ret; 4328 } 4329 4330 /** 4331 * Validate the modify-header IPv6 address actions. 4332 * 4333 * @param[in] action_flags 4334 * Holds the actions detected until now. 4335 * @param[in] action 4336 * Pointer to the modify action. 4337 * @param[in] item_flags 4338 * Holds the items detected. 4339 * @param[out] error 4340 * Pointer to error structure. 4341 * 4342 * @return 4343 * 0 on success, a negative errno value otherwise and rte_errno is set. 4344 */ 4345 static int 4346 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, 4347 const struct rte_flow_action *action, 4348 const uint64_t item_flags, 4349 struct rte_flow_error *error) 4350 { 4351 int ret = 0; 4352 uint64_t layer; 4353 4354 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4355 if (!ret) { 4356 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4357 MLX5_FLOW_LAYER_INNER_L3_IPV6 : 4358 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 4359 if (!(item_flags & layer)) 4360 return rte_flow_error_set(error, EINVAL, 4361 RTE_FLOW_ERROR_TYPE_ACTION, 4362 NULL, 4363 "no ipv6 item in pattern"); 4364 } 4365 return ret; 4366 } 4367 4368 /** 4369 * Validate the modify-header TP actions. 4370 * 4371 * @param[in] action_flags 4372 * Holds the actions detected until now. 4373 * @param[in] action 4374 * Pointer to the modify action. 4375 * @param[in] item_flags 4376 * Holds the items detected. 4377 * @param[out] error 4378 * Pointer to error structure. 4379 * 4380 * @return 4381 * 0 on success, a negative errno value otherwise and rte_errno is set. 4382 */ 4383 static int 4384 flow_dv_validate_action_modify_tp(const uint64_t action_flags, 4385 const struct rte_flow_action *action, 4386 const uint64_t item_flags, 4387 struct rte_flow_error *error) 4388 { 4389 int ret = 0; 4390 uint64_t layer; 4391 4392 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4393 if (!ret) { 4394 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4395 MLX5_FLOW_LAYER_INNER_L4 : 4396 MLX5_FLOW_LAYER_OUTER_L4; 4397 if (!(item_flags & layer)) 4398 return rte_flow_error_set(error, EINVAL, 4399 RTE_FLOW_ERROR_TYPE_ACTION, 4400 NULL, "no transport layer " 4401 "in pattern"); 4402 } 4403 return ret; 4404 } 4405 4406 /** 4407 * Validate the modify-header actions of increment/decrement 4408 * TCP Sequence-number. 4409 * 4410 * @param[in] action_flags 4411 * Holds the actions detected until now. 4412 * @param[in] action 4413 * Pointer to the modify action. 4414 * @param[in] item_flags 4415 * Holds the items detected. 4416 * @param[out] error 4417 * Pointer to error structure. 4418 * 4419 * @return 4420 * 0 on success, a negative errno value otherwise and rte_errno is set. 4421 */ 4422 static int 4423 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, 4424 const struct rte_flow_action *action, 4425 const uint64_t item_flags, 4426 struct rte_flow_error *error) 4427 { 4428 int ret = 0; 4429 uint64_t layer; 4430 4431 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4432 if (!ret) { 4433 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4434 MLX5_FLOW_LAYER_INNER_L4_TCP : 4435 MLX5_FLOW_LAYER_OUTER_L4_TCP; 4436 if (!(item_flags & layer)) 4437 return rte_flow_error_set(error, EINVAL, 4438 RTE_FLOW_ERROR_TYPE_ACTION, 4439 NULL, "no TCP item in" 4440 " pattern"); 4441 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ && 4442 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) || 4443 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ && 4444 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ))) 4445 return rte_flow_error_set(error, EINVAL, 4446 RTE_FLOW_ERROR_TYPE_ACTION, 4447 NULL, 4448 "cannot decrease and increase" 4449 " TCP sequence number" 4450 " at the same time"); 4451 } 4452 return ret; 4453 } 4454 4455 /** 4456 * Validate the modify-header actions of increment/decrement 4457 * TCP Acknowledgment number. 4458 * 4459 * @param[in] action_flags 4460 * Holds the actions detected until now. 4461 * @param[in] action 4462 * Pointer to the modify action. 4463 * @param[in] item_flags 4464 * Holds the items detected. 4465 * @param[out] error 4466 * Pointer to error structure. 4467 * 4468 * @return 4469 * 0 on success, a negative errno value otherwise and rte_errno is set. 4470 */ 4471 static int 4472 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, 4473 const struct rte_flow_action *action, 4474 const uint64_t item_flags, 4475 struct rte_flow_error *error) 4476 { 4477 int ret = 0; 4478 uint64_t layer; 4479 4480 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4481 if (!ret) { 4482 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4483 MLX5_FLOW_LAYER_INNER_L4_TCP : 4484 MLX5_FLOW_LAYER_OUTER_L4_TCP; 4485 if (!(item_flags & layer)) 4486 return rte_flow_error_set(error, EINVAL, 4487 RTE_FLOW_ERROR_TYPE_ACTION, 4488 NULL, "no TCP item in" 4489 " pattern"); 4490 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK && 4491 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) || 4492 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK && 4493 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK))) 4494 return rte_flow_error_set(error, EINVAL, 4495 RTE_FLOW_ERROR_TYPE_ACTION, 4496 NULL, 4497 "cannot decrease and increase" 4498 " TCP acknowledgment number" 4499 " at the same time"); 4500 } 4501 return ret; 4502 } 4503 4504 /** 4505 * Validate the modify-header TTL actions. 4506 * 4507 * @param[in] action_flags 4508 * Holds the actions detected until now. 4509 * @param[in] action 4510 * Pointer to the modify action. 4511 * @param[in] item_flags 4512 * Holds the items detected. 4513 * @param[out] error 4514 * Pointer to error structure. 4515 * 4516 * @return 4517 * 0 on success, a negative errno value otherwise and rte_errno is set. 4518 */ 4519 static int 4520 flow_dv_validate_action_modify_ttl(const uint64_t action_flags, 4521 const struct rte_flow_action *action, 4522 const uint64_t item_flags, 4523 struct rte_flow_error *error) 4524 { 4525 int ret = 0; 4526 uint64_t layer; 4527 4528 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4529 if (!ret) { 4530 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 4531 MLX5_FLOW_LAYER_INNER_L3 : 4532 MLX5_FLOW_LAYER_OUTER_L3; 4533 if (!(item_flags & layer)) 4534 return rte_flow_error_set(error, EINVAL, 4535 RTE_FLOW_ERROR_TYPE_ACTION, 4536 NULL, 4537 "no IP protocol in pattern"); 4538 } 4539 return ret; 4540 } 4541 4542 /** 4543 * Validate the generic modify field actions. 4544 * @param[in] dev 4545 * Pointer to the rte_eth_dev structure. 4546 * @param[in] action_flags 4547 * Holds the actions detected until now. 4548 * @param[in] action 4549 * Pointer to the modify action. 4550 * @param[in] attr 4551 * Pointer to the flow attributes. 4552 * @param[out] error 4553 * Pointer to error structure. 4554 * 4555 * @return 4556 * Number of header fields to modify (0 or more) on success, 4557 * a negative errno value otherwise and rte_errno is set. 4558 */ 4559 static int 4560 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, 4561 const uint64_t action_flags, 4562 const struct rte_flow_action *action, 4563 const struct rte_flow_attr *attr, 4564 struct rte_flow_error *error) 4565 { 4566 int ret = 0; 4567 struct mlx5_priv *priv = dev->data->dev_private; 4568 struct mlx5_dev_config *config = &priv->config; 4569 const struct rte_flow_action_modify_field *action_modify_field = 4570 action->conf; 4571 uint32_t dst_width = 4572 mlx5_flow_item_field_width(action_modify_field->dst.field); 4573 uint32_t src_width = 4574 mlx5_flow_item_field_width(action_modify_field->src.field); 4575 4576 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4577 if (ret) 4578 return ret; 4579 4580 if (action_modify_field->width == 0) 4581 return rte_flow_error_set(error, EINVAL, 4582 RTE_FLOW_ERROR_TYPE_ACTION, action, 4583 "no bits are requested to be modified"); 4584 else if (action_modify_field->width > dst_width || 4585 action_modify_field->width > src_width) 4586 return rte_flow_error_set(error, EINVAL, 4587 RTE_FLOW_ERROR_TYPE_ACTION, action, 4588 "cannot modify more bits than" 4589 " the width of a field"); 4590 if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE && 4591 action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) { 4592 if ((action_modify_field->dst.offset + 4593 action_modify_field->width > dst_width) || 4594 (action_modify_field->dst.offset % 32)) 4595 return rte_flow_error_set(error, EINVAL, 4596 RTE_FLOW_ERROR_TYPE_ACTION, action, 4597 "destination offset is too big" 4598 " or not aligned to 4 bytes"); 4599 if (action_modify_field->dst.level && 4600 action_modify_field->dst.field != RTE_FLOW_FIELD_TAG) 4601 return rte_flow_error_set(error, ENOTSUP, 4602 RTE_FLOW_ERROR_TYPE_ACTION, action, 4603 "inner header fields modification" 4604 " is not supported"); 4605 } 4606 if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE && 4607 action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) { 4608 if (!attr->transfer && !attr->group) 4609 return rte_flow_error_set(error, ENOTSUP, 4610 RTE_FLOW_ERROR_TYPE_ACTION, action, 4611 "modify field action is not" 4612 " supported for group 0"); 4613 if ((action_modify_field->src.offset + 4614 action_modify_field->width > src_width) || 4615 (action_modify_field->src.offset % 32)) 4616 return rte_flow_error_set(error, EINVAL, 4617 RTE_FLOW_ERROR_TYPE_ACTION, action, 4618 "source offset is too big" 4619 " or not aligned to 4 bytes"); 4620 if (action_modify_field->src.level && 4621 action_modify_field->src.field != RTE_FLOW_FIELD_TAG) 4622 return rte_flow_error_set(error, ENOTSUP, 4623 RTE_FLOW_ERROR_TYPE_ACTION, action, 4624 "inner header fields modification" 4625 " is not supported"); 4626 } 4627 if (action_modify_field->dst.field == 4628 action_modify_field->src.field) 4629 return rte_flow_error_set(error, EINVAL, 4630 RTE_FLOW_ERROR_TYPE_ACTION, action, 4631 "source and destination fields" 4632 " cannot be the same"); 4633 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE || 4634 action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER) 4635 return rte_flow_error_set(error, EINVAL, 4636 RTE_FLOW_ERROR_TYPE_ACTION, action, 4637 "immediate value or a pointer to it" 4638 " cannot be used as a destination"); 4639 if (action_modify_field->dst.field == RTE_FLOW_FIELD_START || 4640 action_modify_field->src.field == RTE_FLOW_FIELD_START) 4641 return rte_flow_error_set(error, ENOTSUP, 4642 RTE_FLOW_ERROR_TYPE_ACTION, action, 4643 "modifications of an arbitrary" 4644 " place in a packet is not supported"); 4645 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE || 4646 action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE) 4647 return rte_flow_error_set(error, ENOTSUP, 4648 RTE_FLOW_ERROR_TYPE_ACTION, action, 4649 "modifications of the 802.1Q Tag" 4650 " Identifier is not supported"); 4651 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI || 4652 action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI) 4653 return rte_flow_error_set(error, ENOTSUP, 4654 RTE_FLOW_ERROR_TYPE_ACTION, action, 4655 "modifications of the VXLAN Network" 4656 " Identifier is not supported"); 4657 if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI || 4658 action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI) 4659 return rte_flow_error_set(error, ENOTSUP, 4660 RTE_FLOW_ERROR_TYPE_ACTION, action, 4661 "modifications of the GENEVE Network" 4662 " Identifier is not supported"); 4663 if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK || 4664 action_modify_field->src.field == RTE_FLOW_FIELD_MARK || 4665 action_modify_field->dst.field == RTE_FLOW_FIELD_META || 4666 action_modify_field->src.field == RTE_FLOW_FIELD_META) { 4667 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 4668 !mlx5_flow_ext_mreg_supported(dev)) 4669 return rte_flow_error_set(error, ENOTSUP, 4670 RTE_FLOW_ERROR_TYPE_ACTION, action, 4671 "cannot modify mark or metadata without" 4672 " extended metadata register support"); 4673 } 4674 if (action_modify_field->operation != RTE_FLOW_MODIFY_SET) 4675 return rte_flow_error_set(error, ENOTSUP, 4676 RTE_FLOW_ERROR_TYPE_ACTION, action, 4677 "add and sub operations" 4678 " are not supported"); 4679 return (action_modify_field->width / 32) + 4680 !!(action_modify_field->width % 32); 4681 } 4682 4683 /** 4684 * Validate jump action. 4685 * 4686 * @param[in] action 4687 * Pointer to the jump action. 4688 * @param[in] action_flags 4689 * Holds the actions detected until now. 4690 * @param[in] attributes 4691 * Pointer to flow attributes 4692 * @param[in] external 4693 * Action belongs to flow rule created by request external to PMD. 4694 * @param[out] error 4695 * Pointer to error structure. 4696 * 4697 * @return 4698 * 0 on success, a negative errno value otherwise and rte_errno is set. 4699 */ 4700 static int 4701 flow_dv_validate_action_jump(struct rte_eth_dev *dev, 4702 const struct mlx5_flow_tunnel *tunnel, 4703 const struct rte_flow_action *action, 4704 uint64_t action_flags, 4705 const struct rte_flow_attr *attributes, 4706 bool external, struct rte_flow_error *error) 4707 { 4708 uint32_t target_group, table; 4709 int ret = 0; 4710 struct flow_grp_info grp_info = { 4711 .external = !!external, 4712 .transfer = !!attributes->transfer, 4713 .fdb_def_rule = 1, 4714 .std_tbl_fix = 0 4715 }; 4716 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 4717 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 4718 return rte_flow_error_set(error, EINVAL, 4719 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4720 "can't have 2 fate actions in" 4721 " same flow"); 4722 if (action_flags & MLX5_FLOW_ACTION_METER) 4723 return rte_flow_error_set(error, ENOTSUP, 4724 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4725 "jump with meter not support"); 4726 if (!action->conf) 4727 return rte_flow_error_set(error, EINVAL, 4728 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 4729 NULL, "action configuration not set"); 4730 target_group = 4731 ((const struct rte_flow_action_jump *)action->conf)->group; 4732 ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table, 4733 &grp_info, error); 4734 if (ret) 4735 return ret; 4736 if (attributes->group == target_group && 4737 !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | 4738 MLX5_FLOW_ACTION_TUNNEL_MATCH))) 4739 return rte_flow_error_set(error, EINVAL, 4740 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4741 "target group must be other than" 4742 " the current flow group"); 4743 return 0; 4744 } 4745 4746 /* 4747 * Validate the port_id action. 4748 * 4749 * @param[in] dev 4750 * Pointer to rte_eth_dev structure. 4751 * @param[in] action_flags 4752 * Bit-fields that holds the actions detected until now. 4753 * @param[in] action 4754 * Port_id RTE action structure. 4755 * @param[in] attr 4756 * Attributes of flow that includes this action. 4757 * @param[out] error 4758 * Pointer to error structure. 4759 * 4760 * @return 4761 * 0 on success, a negative errno value otherwise and rte_errno is set. 4762 */ 4763 static int 4764 flow_dv_validate_action_port_id(struct rte_eth_dev *dev, 4765 uint64_t action_flags, 4766 const struct rte_flow_action *action, 4767 const struct rte_flow_attr *attr, 4768 struct rte_flow_error *error) 4769 { 4770 const struct rte_flow_action_port_id *port_id; 4771 struct mlx5_priv *act_priv; 4772 struct mlx5_priv *dev_priv; 4773 uint16_t port; 4774 4775 if (!attr->transfer) 4776 return rte_flow_error_set(error, ENOTSUP, 4777 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4778 NULL, 4779 "port id action is valid in transfer" 4780 " mode only"); 4781 if (!action || !action->conf) 4782 return rte_flow_error_set(error, ENOTSUP, 4783 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 4784 NULL, 4785 "port id action parameters must be" 4786 " specified"); 4787 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 4788 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 4789 return rte_flow_error_set(error, EINVAL, 4790 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4791 "can have only one fate actions in" 4792 " a flow"); 4793 dev_priv = mlx5_dev_to_eswitch_info(dev); 4794 if (!dev_priv) 4795 return rte_flow_error_set(error, rte_errno, 4796 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4797 NULL, 4798 "failed to obtain E-Switch info"); 4799 port_id = action->conf; 4800 port = port_id->original ? dev->data->port_id : port_id->id; 4801 act_priv = mlx5_port_to_eswitch_info(port, false); 4802 if (!act_priv) 4803 return rte_flow_error_set 4804 (error, rte_errno, 4805 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id, 4806 "failed to obtain E-Switch port id for port"); 4807 if (act_priv->domain_id != dev_priv->domain_id) 4808 return rte_flow_error_set 4809 (error, EINVAL, 4810 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4811 "port does not belong to" 4812 " E-Switch being configured"); 4813 return 0; 4814 } 4815 4816 /** 4817 * Get the maximum number of modify header actions. 4818 * 4819 * @param dev 4820 * Pointer to rte_eth_dev structure. 4821 * @param flags 4822 * Flags bits to check if root level. 4823 * 4824 * @return 4825 * Max number of modify header actions device can support. 4826 */ 4827 static inline unsigned int 4828 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, 4829 uint64_t flags) 4830 { 4831 /* 4832 * There's no way to directly query the max capacity from FW. 4833 * The maximal value on root table should be assumed to be supported. 4834 */ 4835 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL)) 4836 return MLX5_MAX_MODIFY_NUM; 4837 else 4838 return MLX5_ROOT_TBL_MODIFY_NUM; 4839 } 4840 4841 /** 4842 * Validate the meter action. 4843 * 4844 * @param[in] dev 4845 * Pointer to rte_eth_dev structure. 4846 * @param[in] action_flags 4847 * Bit-fields that holds the actions detected until now. 4848 * @param[in] action 4849 * Pointer to the meter action. 4850 * @param[in] attr 4851 * Attributes of flow that includes this action. 4852 * @param[out] error 4853 * Pointer to error structure. 4854 * 4855 * @return 4856 * 0 on success, a negative errno value otherwise and rte_ernno is set. 4857 */ 4858 static int 4859 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, 4860 uint64_t action_flags, 4861 const struct rte_flow_action *action, 4862 const struct rte_flow_attr *attr, 4863 struct rte_flow_error *error) 4864 { 4865 struct mlx5_priv *priv = dev->data->dev_private; 4866 const struct rte_flow_action_meter *am = action->conf; 4867 struct mlx5_flow_meter *fm; 4868 4869 if (!am) 4870 return rte_flow_error_set(error, EINVAL, 4871 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4872 "meter action conf is NULL"); 4873 4874 if (action_flags & MLX5_FLOW_ACTION_METER) 4875 return rte_flow_error_set(error, ENOTSUP, 4876 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4877 "meter chaining not support"); 4878 if (action_flags & MLX5_FLOW_ACTION_JUMP) 4879 return rte_flow_error_set(error, ENOTSUP, 4880 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4881 "meter with jump not support"); 4882 if (!priv->mtr_en) 4883 return rte_flow_error_set(error, ENOTSUP, 4884 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4885 NULL, 4886 "meter action not supported"); 4887 fm = mlx5_flow_meter_find(priv, am->mtr_id); 4888 if (!fm) 4889 return rte_flow_error_set(error, EINVAL, 4890 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4891 "Meter not found"); 4892 if (fm->ref_cnt && (!(fm->transfer == attr->transfer || 4893 (!fm->ingress && !attr->ingress && attr->egress) || 4894 (!fm->egress && !attr->egress && attr->ingress)))) 4895 return rte_flow_error_set(error, EINVAL, 4896 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4897 "Flow attributes are either invalid " 4898 "or have a conflict with current " 4899 "meter attributes"); 4900 return 0; 4901 } 4902 4903 /** 4904 * Validate the age action. 4905 * 4906 * @param[in] action_flags 4907 * Holds the actions detected until now. 4908 * @param[in] action 4909 * Pointer to the age action. 4910 * @param[in] dev 4911 * Pointer to the Ethernet device structure. 4912 * @param[out] error 4913 * Pointer to error structure. 4914 * 4915 * @return 4916 * 0 on success, a negative errno value otherwise and rte_errno is set. 4917 */ 4918 static int 4919 flow_dv_validate_action_age(uint64_t action_flags, 4920 const struct rte_flow_action *action, 4921 struct rte_eth_dev *dev, 4922 struct rte_flow_error *error) 4923 { 4924 struct mlx5_priv *priv = dev->data->dev_private; 4925 const struct rte_flow_action_age *age = action->conf; 4926 4927 if (!priv->config.devx || (priv->sh->cmng.counter_fallback && 4928 !priv->sh->aso_age_mng)) 4929 return rte_flow_error_set(error, ENOTSUP, 4930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4931 NULL, 4932 "age action not supported"); 4933 if (!(action->conf)) 4934 return rte_flow_error_set(error, EINVAL, 4935 RTE_FLOW_ERROR_TYPE_ACTION, action, 4936 "configuration cannot be null"); 4937 if (!(age->timeout)) 4938 return rte_flow_error_set(error, EINVAL, 4939 RTE_FLOW_ERROR_TYPE_ACTION, action, 4940 "invalid timeout value 0"); 4941 if (action_flags & MLX5_FLOW_ACTION_AGE) 4942 return rte_flow_error_set(error, EINVAL, 4943 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4944 "duplicate age actions set"); 4945 return 0; 4946 } 4947 4948 /** 4949 * Validate the modify-header IPv4 DSCP actions. 4950 * 4951 * @param[in] action_flags 4952 * Holds the actions detected until now. 4953 * @param[in] action 4954 * Pointer to the modify action. 4955 * @param[in] item_flags 4956 * Holds the items detected. 4957 * @param[out] error 4958 * Pointer to error structure. 4959 * 4960 * @return 4961 * 0 on success, a negative errno value otherwise and rte_errno is set. 4962 */ 4963 static int 4964 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags, 4965 const struct rte_flow_action *action, 4966 const uint64_t item_flags, 4967 struct rte_flow_error *error) 4968 { 4969 int ret = 0; 4970 4971 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4972 if (!ret) { 4973 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) 4974 return rte_flow_error_set(error, EINVAL, 4975 RTE_FLOW_ERROR_TYPE_ACTION, 4976 NULL, 4977 "no ipv4 item in pattern"); 4978 } 4979 return ret; 4980 } 4981 4982 /** 4983 * Validate the modify-header IPv6 DSCP actions. 4984 * 4985 * @param[in] action_flags 4986 * Holds the actions detected until now. 4987 * @param[in] action 4988 * Pointer to the modify action. 4989 * @param[in] item_flags 4990 * Holds the items detected. 4991 * @param[out] error 4992 * Pointer to error structure. 4993 * 4994 * @return 4995 * 0 on success, a negative errno value otherwise and rte_errno is set. 4996 */ 4997 static int 4998 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, 4999 const struct rte_flow_action *action, 5000 const uint64_t item_flags, 5001 struct rte_flow_error *error) 5002 { 5003 int ret = 0; 5004 5005 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 5006 if (!ret) { 5007 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) 5008 return rte_flow_error_set(error, EINVAL, 5009 RTE_FLOW_ERROR_TYPE_ACTION, 5010 NULL, 5011 "no ipv6 item in pattern"); 5012 } 5013 return ret; 5014 } 5015 5016 /** 5017 * Match modify-header resource. 5018 * 5019 * @param list 5020 * Pointer to the hash list. 5021 * @param entry 5022 * Pointer to exist resource entry object. 5023 * @param key 5024 * Key of the new entry. 5025 * @param ctx 5026 * Pointer to new modify-header resource. 5027 * 5028 * @return 5029 * 0 on matching, non-zero otherwise. 5030 */ 5031 int 5032 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused, 5033 struct mlx5_hlist_entry *entry, 5034 uint64_t key __rte_unused, void *cb_ctx) 5035 { 5036 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5037 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; 5038 struct mlx5_flow_dv_modify_hdr_resource *resource = 5039 container_of(entry, typeof(*resource), entry); 5040 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); 5041 5042 key_len += ref->actions_num * sizeof(ref->actions[0]); 5043 return ref->actions_num != resource->actions_num || 5044 memcmp(&ref->ft_type, &resource->ft_type, key_len); 5045 } 5046 5047 struct mlx5_hlist_entry * 5048 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, 5049 void *cb_ctx) 5050 { 5051 struct mlx5_dev_ctx_shared *sh = list->ctx; 5052 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5053 struct mlx5dv_dr_domain *ns; 5054 struct mlx5_flow_dv_modify_hdr_resource *entry; 5055 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; 5056 int ret; 5057 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]); 5058 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); 5059 5060 entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0, 5061 SOCKET_ID_ANY); 5062 if (!entry) { 5063 rte_flow_error_set(ctx->error, ENOMEM, 5064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5065 "cannot allocate resource memory"); 5066 return NULL; 5067 } 5068 rte_memcpy(&entry->ft_type, 5069 RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), 5070 key_len + data_len); 5071 if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 5072 ns = sh->fdb_domain; 5073 else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) 5074 ns = sh->tx_domain; 5075 else 5076 ns = sh->rx_domain; 5077 ret = mlx5_flow_os_create_flow_action_modify_header 5078 (sh->ctx, ns, entry, 5079 data_len, &entry->action); 5080 if (ret) { 5081 mlx5_free(entry); 5082 rte_flow_error_set(ctx->error, ENOMEM, 5083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5084 NULL, "cannot create modification action"); 5085 return NULL; 5086 } 5087 return &entry->entry; 5088 } 5089 5090 /** 5091 * Validate the sample action. 5092 * 5093 * @param[in, out] action_flags 5094 * Holds the actions detected until now. 5095 * @param[in] action 5096 * Pointer to the sample action. 5097 * @param[in] dev 5098 * Pointer to the Ethernet device structure. 5099 * @param[in] attr 5100 * Attributes of flow that includes this action. 5101 * @param[in] item_flags 5102 * Holds the items detected. 5103 * @param[in] rss 5104 * Pointer to the RSS action. 5105 * @param[out] sample_rss 5106 * Pointer to the RSS action in sample action list. 5107 * @param[out] count 5108 * Pointer to the COUNT action in sample action list. 5109 * @param[out] fdb_mirror_limit 5110 * Pointer to the FDB mirror limitation flag. 5111 * @param[out] error 5112 * Pointer to error structure. 5113 * 5114 * @return 5115 * 0 on success, a negative errno value otherwise and rte_errno is set. 5116 */ 5117 static int 5118 flow_dv_validate_action_sample(uint64_t *action_flags, 5119 const struct rte_flow_action *action, 5120 struct rte_eth_dev *dev, 5121 const struct rte_flow_attr *attr, 5122 uint64_t item_flags, 5123 const struct rte_flow_action_rss *rss, 5124 const struct rte_flow_action_rss **sample_rss, 5125 const struct rte_flow_action_count **count, 5126 int *fdb_mirror_limit, 5127 struct rte_flow_error *error) 5128 { 5129 struct mlx5_priv *priv = dev->data->dev_private; 5130 struct mlx5_dev_config *dev_conf = &priv->config; 5131 const struct rte_flow_action_sample *sample = action->conf; 5132 const struct rte_flow_action *act; 5133 uint64_t sub_action_flags = 0; 5134 uint16_t queue_index = 0xFFFF; 5135 int actions_n = 0; 5136 int ret; 5137 5138 if (!sample) 5139 return rte_flow_error_set(error, EINVAL, 5140 RTE_FLOW_ERROR_TYPE_ACTION, action, 5141 "configuration cannot be NULL"); 5142 if (sample->ratio == 0) 5143 return rte_flow_error_set(error, EINVAL, 5144 RTE_FLOW_ERROR_TYPE_ACTION, action, 5145 "ratio value starts from 1"); 5146 if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en)) 5147 return rte_flow_error_set(error, ENOTSUP, 5148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5149 NULL, 5150 "sample action not supported"); 5151 if (*action_flags & MLX5_FLOW_ACTION_SAMPLE) 5152 return rte_flow_error_set(error, EINVAL, 5153 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 5154 "Multiple sample actions not " 5155 "supported"); 5156 if (*action_flags & MLX5_FLOW_ACTION_METER) 5157 return rte_flow_error_set(error, EINVAL, 5158 RTE_FLOW_ERROR_TYPE_ACTION, action, 5159 "wrong action order, meter should " 5160 "be after sample action"); 5161 if (*action_flags & MLX5_FLOW_ACTION_JUMP) 5162 return rte_flow_error_set(error, EINVAL, 5163 RTE_FLOW_ERROR_TYPE_ACTION, action, 5164 "wrong action order, jump should " 5165 "be after sample action"); 5166 act = sample->actions; 5167 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { 5168 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 5169 return rte_flow_error_set(error, ENOTSUP, 5170 RTE_FLOW_ERROR_TYPE_ACTION, 5171 act, "too many actions"); 5172 switch (act->type) { 5173 case RTE_FLOW_ACTION_TYPE_QUEUE: 5174 ret = mlx5_flow_validate_action_queue(act, 5175 sub_action_flags, 5176 dev, 5177 attr, error); 5178 if (ret < 0) 5179 return ret; 5180 queue_index = ((const struct rte_flow_action_queue *) 5181 (act->conf))->index; 5182 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; 5183 ++actions_n; 5184 break; 5185 case RTE_FLOW_ACTION_TYPE_RSS: 5186 *sample_rss = act->conf; 5187 ret = mlx5_flow_validate_action_rss(act, 5188 sub_action_flags, 5189 dev, attr, 5190 item_flags, 5191 error); 5192 if (ret < 0) 5193 return ret; 5194 if (rss && *sample_rss && 5195 ((*sample_rss)->level != rss->level || 5196 (*sample_rss)->types != rss->types)) 5197 return rte_flow_error_set(error, ENOTSUP, 5198 RTE_FLOW_ERROR_TYPE_ACTION, 5199 NULL, 5200 "Can't use the different RSS types " 5201 "or level in the same flow"); 5202 if (*sample_rss != NULL && (*sample_rss)->queue_num) 5203 queue_index = (*sample_rss)->queue[0]; 5204 sub_action_flags |= MLX5_FLOW_ACTION_RSS; 5205 ++actions_n; 5206 break; 5207 case RTE_FLOW_ACTION_TYPE_MARK: 5208 ret = flow_dv_validate_action_mark(dev, act, 5209 sub_action_flags, 5210 attr, error); 5211 if (ret < 0) 5212 return ret; 5213 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) 5214 sub_action_flags |= MLX5_FLOW_ACTION_MARK | 5215 MLX5_FLOW_ACTION_MARK_EXT; 5216 else 5217 sub_action_flags |= MLX5_FLOW_ACTION_MARK; 5218 ++actions_n; 5219 break; 5220 case RTE_FLOW_ACTION_TYPE_COUNT: 5221 ret = flow_dv_validate_action_count 5222 (dev, act, 5223 *action_flags | sub_action_flags, 5224 error); 5225 if (ret < 0) 5226 return ret; 5227 *count = act->conf; 5228 sub_action_flags |= MLX5_FLOW_ACTION_COUNT; 5229 *action_flags |= MLX5_FLOW_ACTION_COUNT; 5230 ++actions_n; 5231 break; 5232 case RTE_FLOW_ACTION_TYPE_PORT_ID: 5233 ret = flow_dv_validate_action_port_id(dev, 5234 sub_action_flags, 5235 act, 5236 attr, 5237 error); 5238 if (ret) 5239 return ret; 5240 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; 5241 ++actions_n; 5242 break; 5243 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5244 ret = flow_dv_validate_action_raw_encap_decap 5245 (dev, NULL, act->conf, attr, &sub_action_flags, 5246 &actions_n, action, item_flags, error); 5247 if (ret < 0) 5248 return ret; 5249 ++actions_n; 5250 break; 5251 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5252 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5253 ret = flow_dv_validate_action_l2_encap(dev, 5254 sub_action_flags, 5255 act, attr, 5256 error); 5257 if (ret < 0) 5258 return ret; 5259 sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; 5260 ++actions_n; 5261 break; 5262 default: 5263 return rte_flow_error_set(error, ENOTSUP, 5264 RTE_FLOW_ERROR_TYPE_ACTION, 5265 NULL, 5266 "Doesn't support optional " 5267 "action"); 5268 } 5269 } 5270 if (attr->ingress && !attr->transfer) { 5271 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE | 5272 MLX5_FLOW_ACTION_RSS))) 5273 return rte_flow_error_set(error, EINVAL, 5274 RTE_FLOW_ERROR_TYPE_ACTION, 5275 NULL, 5276 "Ingress must has a dest " 5277 "QUEUE for Sample"); 5278 } else if (attr->egress && !attr->transfer) { 5279 return rte_flow_error_set(error, ENOTSUP, 5280 RTE_FLOW_ERROR_TYPE_ACTION, 5281 NULL, 5282 "Sample Only support Ingress " 5283 "or E-Switch"); 5284 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) { 5285 MLX5_ASSERT(attr->transfer); 5286 if (sample->ratio > 1) 5287 return rte_flow_error_set(error, ENOTSUP, 5288 RTE_FLOW_ERROR_TYPE_ACTION, 5289 NULL, 5290 "E-Switch doesn't support " 5291 "any optional action " 5292 "for sampling"); 5293 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) 5294 return rte_flow_error_set(error, ENOTSUP, 5295 RTE_FLOW_ERROR_TYPE_ACTION, 5296 NULL, 5297 "unsupported action QUEUE"); 5298 if (sub_action_flags & MLX5_FLOW_ACTION_RSS) 5299 return rte_flow_error_set(error, ENOTSUP, 5300 RTE_FLOW_ERROR_TYPE_ACTION, 5301 NULL, 5302 "unsupported action QUEUE"); 5303 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) 5304 return rte_flow_error_set(error, EINVAL, 5305 RTE_FLOW_ERROR_TYPE_ACTION, 5306 NULL, 5307 "E-Switch must has a dest " 5308 "port for mirroring"); 5309 if (!priv->config.hca_attr.reg_c_preserve && 5310 priv->representor_id != -1) 5311 *fdb_mirror_limit = 1; 5312 } 5313 /* Continue validation for Xcap actions.*/ 5314 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && 5315 (queue_index == 0xFFFF || 5316 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { 5317 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == 5318 MLX5_FLOW_XCAP_ACTIONS) 5319 return rte_flow_error_set(error, ENOTSUP, 5320 RTE_FLOW_ERROR_TYPE_ACTION, 5321 NULL, "encap and decap " 5322 "combination aren't " 5323 "supported"); 5324 if (!attr->transfer && attr->ingress && (sub_action_flags & 5325 MLX5_FLOW_ACTION_ENCAP)) 5326 return rte_flow_error_set(error, ENOTSUP, 5327 RTE_FLOW_ERROR_TYPE_ACTION, 5328 NULL, "encap is not supported" 5329 " for ingress traffic"); 5330 } 5331 return 0; 5332 } 5333 5334 /** 5335 * Find existing modify-header resource or create and register a new one. 5336 * 5337 * @param dev[in, out] 5338 * Pointer to rte_eth_dev structure. 5339 * @param[in, out] resource 5340 * Pointer to modify-header resource. 5341 * @parm[in, out] dev_flow 5342 * Pointer to the dev_flow. 5343 * @param[out] error 5344 * pointer to error structure. 5345 * 5346 * @return 5347 * 0 on success otherwise -errno and errno is set. 5348 */ 5349 static int 5350 flow_dv_modify_hdr_resource_register 5351 (struct rte_eth_dev *dev, 5352 struct mlx5_flow_dv_modify_hdr_resource *resource, 5353 struct mlx5_flow *dev_flow, 5354 struct rte_flow_error *error) 5355 { 5356 struct mlx5_priv *priv = dev->data->dev_private; 5357 struct mlx5_dev_ctx_shared *sh = priv->sh; 5358 uint32_t key_len = sizeof(*resource) - 5359 offsetof(typeof(*resource), ft_type) + 5360 resource->actions_num * sizeof(resource->actions[0]); 5361 struct mlx5_hlist_entry *entry; 5362 struct mlx5_flow_cb_ctx ctx = { 5363 .error = error, 5364 .data = resource, 5365 }; 5366 uint64_t key64; 5367 5368 resource->flags = dev_flow->dv.group ? 0 : 5369 MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 5370 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, 5371 resource->flags)) 5372 return rte_flow_error_set(error, EOVERFLOW, 5373 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 5374 "too many modify header items"); 5375 key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0); 5376 entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx); 5377 if (!entry) 5378 return -rte_errno; 5379 resource = container_of(entry, typeof(*resource), entry); 5380 dev_flow->handle->dvh.modify_hdr = resource; 5381 return 0; 5382 } 5383 5384 /** 5385 * Get DV flow counter by index. 5386 * 5387 * @param[in] dev 5388 * Pointer to the Ethernet device structure. 5389 * @param[in] idx 5390 * mlx5 flow counter index in the container. 5391 * @param[out] ppool 5392 * mlx5 flow counter pool in the container, 5393 * 5394 * @return 5395 * Pointer to the counter, NULL otherwise. 5396 */ 5397 static struct mlx5_flow_counter * 5398 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, 5399 uint32_t idx, 5400 struct mlx5_flow_counter_pool **ppool) 5401 { 5402 struct mlx5_priv *priv = dev->data->dev_private; 5403 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 5404 struct mlx5_flow_counter_pool *pool; 5405 5406 /* Decrease to original index and clear shared bit. */ 5407 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); 5408 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n); 5409 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; 5410 MLX5_ASSERT(pool); 5411 if (ppool) 5412 *ppool = pool; 5413 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); 5414 } 5415 5416 /** 5417 * Check the devx counter belongs to the pool. 5418 * 5419 * @param[in] pool 5420 * Pointer to the counter pool. 5421 * @param[in] id 5422 * The counter devx ID. 5423 * 5424 * @return 5425 * True if counter belongs to the pool, false otherwise. 5426 */ 5427 static bool 5428 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) 5429 { 5430 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * 5431 MLX5_COUNTERS_PER_POOL; 5432 5433 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) 5434 return true; 5435 return false; 5436 } 5437 5438 /** 5439 * Get a pool by devx counter ID. 5440 * 5441 * @param[in] cmng 5442 * Pointer to the counter management. 5443 * @param[in] id 5444 * The counter devx ID. 5445 * 5446 * @return 5447 * The counter pool pointer if exists, NULL otherwise, 5448 */ 5449 static struct mlx5_flow_counter_pool * 5450 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) 5451 { 5452 uint32_t i; 5453 struct mlx5_flow_counter_pool *pool = NULL; 5454 5455 rte_spinlock_lock(&cmng->pool_update_sl); 5456 /* Check last used pool. */ 5457 if (cmng->last_pool_idx != POOL_IDX_INVALID && 5458 flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) { 5459 pool = cmng->pools[cmng->last_pool_idx]; 5460 goto out; 5461 } 5462 /* ID out of range means no suitable pool in the container. */ 5463 if (id > cmng->max_id || id < cmng->min_id) 5464 goto out; 5465 /* 5466 * Find the pool from the end of the container, since mostly counter 5467 * ID is sequence increasing, and the last pool should be the needed 5468 * one. 5469 */ 5470 i = cmng->n_valid; 5471 while (i--) { 5472 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i]; 5473 5474 if (flow_dv_is_counter_in_pool(pool_tmp, id)) { 5475 pool = pool_tmp; 5476 break; 5477 } 5478 } 5479 out: 5480 rte_spinlock_unlock(&cmng->pool_update_sl); 5481 return pool; 5482 } 5483 5484 /** 5485 * Resize a counter container. 5486 * 5487 * @param[in] dev 5488 * Pointer to the Ethernet device structure. 5489 * 5490 * @return 5491 * 0 on success, otherwise negative errno value and rte_errno is set. 5492 */ 5493 static int 5494 flow_dv_container_resize(struct rte_eth_dev *dev) 5495 { 5496 struct mlx5_priv *priv = dev->data->dev_private; 5497 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 5498 void *old_pools = cmng->pools; 5499 uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE; 5500 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; 5501 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); 5502 5503 if (!pools) { 5504 rte_errno = ENOMEM; 5505 return -ENOMEM; 5506 } 5507 if (old_pools) 5508 memcpy(pools, old_pools, cmng->n * 5509 sizeof(struct mlx5_flow_counter_pool *)); 5510 cmng->n = resize; 5511 cmng->pools = pools; 5512 if (old_pools) 5513 mlx5_free(old_pools); 5514 return 0; 5515 } 5516 5517 /** 5518 * Query a devx flow counter. 5519 * 5520 * @param[in] dev 5521 * Pointer to the Ethernet device structure. 5522 * @param[in] cnt 5523 * Index to the flow counter. 5524 * @param[out] pkts 5525 * The statistics value of packets. 5526 * @param[out] bytes 5527 * The statistics value of bytes. 5528 * 5529 * @return 5530 * 0 on success, otherwise a negative errno value and rte_errno is set. 5531 */ 5532 static inline int 5533 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, 5534 uint64_t *bytes) 5535 { 5536 struct mlx5_priv *priv = dev->data->dev_private; 5537 struct mlx5_flow_counter_pool *pool = NULL; 5538 struct mlx5_flow_counter *cnt; 5539 int offset; 5540 5541 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 5542 MLX5_ASSERT(pool); 5543 if (priv->sh->cmng.counter_fallback) 5544 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0, 5545 0, pkts, bytes, 0, NULL, NULL, 0); 5546 rte_spinlock_lock(&pool->sl); 5547 if (!pool->raw) { 5548 *pkts = 0; 5549 *bytes = 0; 5550 } else { 5551 offset = MLX5_CNT_ARRAY_IDX(pool, cnt); 5552 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); 5553 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); 5554 } 5555 rte_spinlock_unlock(&pool->sl); 5556 return 0; 5557 } 5558 5559 /** 5560 * Create and initialize a new counter pool. 5561 * 5562 * @param[in] dev 5563 * Pointer to the Ethernet device structure. 5564 * @param[out] dcs 5565 * The devX counter handle. 5566 * @param[in] age 5567 * Whether the pool is for counter that was allocated for aging. 5568 * @param[in/out] cont_cur 5569 * Pointer to the container pointer, it will be update in pool resize. 5570 * 5571 * @return 5572 * The pool container pointer on success, NULL otherwise and rte_errno is set. 5573 */ 5574 static struct mlx5_flow_counter_pool * 5575 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, 5576 uint32_t age) 5577 { 5578 struct mlx5_priv *priv = dev->data->dev_private; 5579 struct mlx5_flow_counter_pool *pool; 5580 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 5581 bool fallback = priv->sh->cmng.counter_fallback; 5582 uint32_t size = sizeof(*pool); 5583 5584 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; 5585 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); 5586 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); 5587 if (!pool) { 5588 rte_errno = ENOMEM; 5589 return NULL; 5590 } 5591 pool->raw = NULL; 5592 pool->is_aged = !!age; 5593 pool->query_gen = 0; 5594 pool->min_dcs = dcs; 5595 rte_spinlock_init(&pool->sl); 5596 rte_spinlock_init(&pool->csl); 5597 TAILQ_INIT(&pool->counters[0]); 5598 TAILQ_INIT(&pool->counters[1]); 5599 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; 5600 rte_spinlock_lock(&cmng->pool_update_sl); 5601 pool->index = cmng->n_valid; 5602 if (pool->index == cmng->n && flow_dv_container_resize(dev)) { 5603 mlx5_free(pool); 5604 rte_spinlock_unlock(&cmng->pool_update_sl); 5605 return NULL; 5606 } 5607 cmng->pools[pool->index] = pool; 5608 cmng->n_valid++; 5609 if (unlikely(fallback)) { 5610 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); 5611 5612 if (base < cmng->min_id) 5613 cmng->min_id = base; 5614 if (base > cmng->max_id) 5615 cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1; 5616 cmng->last_pool_idx = pool->index; 5617 } 5618 rte_spinlock_unlock(&cmng->pool_update_sl); 5619 return pool; 5620 } 5621 5622 /** 5623 * Prepare a new counter and/or a new counter pool. 5624 * 5625 * @param[in] dev 5626 * Pointer to the Ethernet device structure. 5627 * @param[out] cnt_free 5628 * Where to put the pointer of a new counter. 5629 * @param[in] age 5630 * Whether the pool is for counter that was allocated for aging. 5631 * 5632 * @return 5633 * The counter pool pointer and @p cnt_free is set on success, 5634 * NULL otherwise and rte_errno is set. 5635 */ 5636 static struct mlx5_flow_counter_pool * 5637 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, 5638 struct mlx5_flow_counter **cnt_free, 5639 uint32_t age) 5640 { 5641 struct mlx5_priv *priv = dev->data->dev_private; 5642 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 5643 struct mlx5_flow_counter_pool *pool; 5644 struct mlx5_counters tmp_tq; 5645 struct mlx5_devx_obj *dcs = NULL; 5646 struct mlx5_flow_counter *cnt; 5647 enum mlx5_counter_type cnt_type = 5648 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; 5649 bool fallback = priv->sh->cmng.counter_fallback; 5650 uint32_t i; 5651 5652 if (fallback) { 5653 /* bulk_bitmap must be 0 for single counter allocation. */ 5654 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); 5655 if (!dcs) 5656 return NULL; 5657 pool = flow_dv_find_pool_by_id(cmng, dcs->id); 5658 if (!pool) { 5659 pool = flow_dv_pool_create(dev, dcs, age); 5660 if (!pool) { 5661 mlx5_devx_cmd_destroy(dcs); 5662 return NULL; 5663 } 5664 } 5665 i = dcs->id % MLX5_COUNTERS_PER_POOL; 5666 cnt = MLX5_POOL_GET_CNT(pool, i); 5667 cnt->pool = pool; 5668 cnt->dcs_when_free = dcs; 5669 *cnt_free = cnt; 5670 return pool; 5671 } 5672 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); 5673 if (!dcs) { 5674 rte_errno = ENODATA; 5675 return NULL; 5676 } 5677 pool = flow_dv_pool_create(dev, dcs, age); 5678 if (!pool) { 5679 mlx5_devx_cmd_destroy(dcs); 5680 return NULL; 5681 } 5682 TAILQ_INIT(&tmp_tq); 5683 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) { 5684 cnt = MLX5_POOL_GET_CNT(pool, i); 5685 cnt->pool = pool; 5686 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); 5687 } 5688 rte_spinlock_lock(&cmng->csl[cnt_type]); 5689 TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next); 5690 rte_spinlock_unlock(&cmng->csl[cnt_type]); 5691 *cnt_free = MLX5_POOL_GET_CNT(pool, 0); 5692 (*cnt_free)->pool = pool; 5693 return pool; 5694 } 5695 5696 /** 5697 * Allocate a flow counter. 5698 * 5699 * @param[in] dev 5700 * Pointer to the Ethernet device structure. 5701 * @param[in] age 5702 * Whether the counter was allocated for aging. 5703 * 5704 * @return 5705 * Index to flow counter on success, 0 otherwise and rte_errno is set. 5706 */ 5707 static uint32_t 5708 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age) 5709 { 5710 struct mlx5_priv *priv = dev->data->dev_private; 5711 struct mlx5_flow_counter_pool *pool = NULL; 5712 struct mlx5_flow_counter *cnt_free = NULL; 5713 bool fallback = priv->sh->cmng.counter_fallback; 5714 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 5715 enum mlx5_counter_type cnt_type = 5716 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN; 5717 uint32_t cnt_idx; 5718 5719 if (!priv->config.devx) { 5720 rte_errno = ENOTSUP; 5721 return 0; 5722 } 5723 /* Get free counters from container. */ 5724 rte_spinlock_lock(&cmng->csl[cnt_type]); 5725 cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]); 5726 if (cnt_free) 5727 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next); 5728 rte_spinlock_unlock(&cmng->csl[cnt_type]); 5729 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age)) 5730 goto err; 5731 pool = cnt_free->pool; 5732 if (fallback) 5733 cnt_free->dcs_when_active = cnt_free->dcs_when_free; 5734 /* Create a DV counter action only in the first time usage. */ 5735 if (!cnt_free->action) { 5736 uint16_t offset; 5737 struct mlx5_devx_obj *dcs; 5738 int ret; 5739 5740 if (!fallback) { 5741 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); 5742 dcs = pool->min_dcs; 5743 } else { 5744 offset = 0; 5745 dcs = cnt_free->dcs_when_free; 5746 } 5747 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, 5748 &cnt_free->action); 5749 if (ret) { 5750 rte_errno = errno; 5751 goto err; 5752 } 5753 } 5754 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, 5755 MLX5_CNT_ARRAY_IDX(pool, cnt_free)); 5756 /* Update the counter reset values. */ 5757 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, 5758 &cnt_free->bytes)) 5759 goto err; 5760 if (!fallback && !priv->sh->cmng.query_thread_on) 5761 /* Start the asynchronous batch query by the host thread. */ 5762 mlx5_set_query_alarm(priv->sh); 5763 return cnt_idx; 5764 err: 5765 if (cnt_free) { 5766 cnt_free->pool = pool; 5767 if (fallback) 5768 cnt_free->dcs_when_free = cnt_free->dcs_when_active; 5769 rte_spinlock_lock(&cmng->csl[cnt_type]); 5770 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next); 5771 rte_spinlock_unlock(&cmng->csl[cnt_type]); 5772 } 5773 return 0; 5774 } 5775 5776 /** 5777 * Allocate a shared flow counter. 5778 * 5779 * @param[in] ctx 5780 * Pointer to the shared counter configuration. 5781 * @param[in] data 5782 * Pointer to save the allocated counter index. 5783 * 5784 * @return 5785 * Index to flow counter on success, 0 otherwise and rte_errno is set. 5786 */ 5787 5788 static int32_t 5789 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data) 5790 { 5791 struct mlx5_shared_counter_conf *conf = ctx; 5792 struct rte_eth_dev *dev = conf->dev; 5793 struct mlx5_flow_counter *cnt; 5794 5795 data->dword = flow_dv_counter_alloc(dev, 0); 5796 data->dword |= MLX5_CNT_SHARED_OFFSET; 5797 cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL); 5798 cnt->shared_info.id = conf->id; 5799 return 0; 5800 } 5801 5802 /** 5803 * Get a shared flow counter. 5804 * 5805 * @param[in] dev 5806 * Pointer to the Ethernet device structure. 5807 * @param[in] id 5808 * Counter identifier. 5809 * 5810 * @return 5811 * Index to flow counter on success, 0 otherwise and rte_errno is set. 5812 */ 5813 static uint32_t 5814 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id) 5815 { 5816 struct mlx5_priv *priv = dev->data->dev_private; 5817 struct mlx5_shared_counter_conf conf = { 5818 .dev = dev, 5819 .id = id, 5820 }; 5821 union mlx5_l3t_data data = { 5822 .dword = 0, 5823 }; 5824 5825 mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data, 5826 flow_dv_counter_alloc_shared_cb, &conf); 5827 return data.dword; 5828 } 5829 5830 /** 5831 * Get age param from counter index. 5832 * 5833 * @param[in] dev 5834 * Pointer to the Ethernet device structure. 5835 * @param[in] counter 5836 * Index to the counter handler. 5837 * 5838 * @return 5839 * The aging parameter specified for the counter index. 5840 */ 5841 static struct mlx5_age_param* 5842 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev, 5843 uint32_t counter) 5844 { 5845 struct mlx5_flow_counter *cnt; 5846 struct mlx5_flow_counter_pool *pool = NULL; 5847 5848 flow_dv_counter_get_by_idx(dev, counter, &pool); 5849 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL; 5850 cnt = MLX5_POOL_GET_CNT(pool, counter); 5851 return MLX5_CNT_TO_AGE(cnt); 5852 } 5853 5854 /** 5855 * Remove a flow counter from aged counter list. 5856 * 5857 * @param[in] dev 5858 * Pointer to the Ethernet device structure. 5859 * @param[in] counter 5860 * Index to the counter handler. 5861 * @param[in] cnt 5862 * Pointer to the counter handler. 5863 */ 5864 static void 5865 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, 5866 uint32_t counter, struct mlx5_flow_counter *cnt) 5867 { 5868 struct mlx5_age_info *age_info; 5869 struct mlx5_age_param *age_param; 5870 struct mlx5_priv *priv = dev->data->dev_private; 5871 uint16_t expected = AGE_CANDIDATE; 5872 5873 age_info = GET_PORT_AGE_INFO(priv); 5874 age_param = flow_dv_counter_idx_get_age(dev, counter); 5875 if (!__atomic_compare_exchange_n(&age_param->state, &expected, 5876 AGE_FREE, false, __ATOMIC_RELAXED, 5877 __ATOMIC_RELAXED)) { 5878 /** 5879 * We need the lock even it is age timeout, 5880 * since counter may still in process. 5881 */ 5882 rte_spinlock_lock(&age_info->aged_sl); 5883 TAILQ_REMOVE(&age_info->aged_counters, cnt, next); 5884 rte_spinlock_unlock(&age_info->aged_sl); 5885 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); 5886 } 5887 } 5888 5889 /** 5890 * Release a flow counter. 5891 * 5892 * @param[in] dev 5893 * Pointer to the Ethernet device structure. 5894 * @param[in] counter 5895 * Index to the counter handler. 5896 */ 5897 static void 5898 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) 5899 { 5900 struct mlx5_priv *priv = dev->data->dev_private; 5901 struct mlx5_flow_counter_pool *pool = NULL; 5902 struct mlx5_flow_counter *cnt; 5903 enum mlx5_counter_type cnt_type; 5904 5905 if (!counter) 5906 return; 5907 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 5908 MLX5_ASSERT(pool); 5909 if (IS_SHARED_CNT(counter) && 5910 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) 5911 return; 5912 if (pool->is_aged) 5913 flow_dv_counter_remove_from_age(dev, counter, cnt); 5914 cnt->pool = pool; 5915 /* 5916 * Put the counter back to list to be updated in none fallback mode. 5917 * Currently, we are using two list alternately, while one is in query, 5918 * add the freed counter to the other list based on the pool query_gen 5919 * value. After query finishes, add counter the list to the global 5920 * container counter list. The list changes while query starts. In 5921 * this case, lock will not be needed as query callback and release 5922 * function both operate with the different list. 5923 * 5924 */ 5925 if (!priv->sh->cmng.counter_fallback) { 5926 rte_spinlock_lock(&pool->csl); 5927 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); 5928 rte_spinlock_unlock(&pool->csl); 5929 } else { 5930 cnt->dcs_when_free = cnt->dcs_when_active; 5931 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE : 5932 MLX5_COUNTER_TYPE_ORIGIN; 5933 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]); 5934 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type], 5935 cnt, next); 5936 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]); 5937 } 5938 } 5939 5940 /** 5941 * Verify the @p attributes will be correctly understood by the NIC and store 5942 * them in the @p flow if everything is correct. 5943 * 5944 * @param[in] dev 5945 * Pointer to dev struct. 5946 * @param[in] attributes 5947 * Pointer to flow attributes 5948 * @param[in] external 5949 * This flow rule is created by request external to PMD. 5950 * @param[out] error 5951 * Pointer to error structure. 5952 * 5953 * @return 5954 * - 0 on success and non root table. 5955 * - 1 on success and root table. 5956 * - a negative errno value otherwise and rte_errno is set. 5957 */ 5958 static int 5959 flow_dv_validate_attributes(struct rte_eth_dev *dev, 5960 const struct mlx5_flow_tunnel *tunnel, 5961 const struct rte_flow_attr *attributes, 5962 const struct flow_grp_info *grp_info, 5963 struct rte_flow_error *error) 5964 { 5965 struct mlx5_priv *priv = dev->data->dev_private; 5966 uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes); 5967 int ret = 0; 5968 5969 #ifndef HAVE_MLX5DV_DR 5970 RTE_SET_USED(tunnel); 5971 RTE_SET_USED(grp_info); 5972 if (attributes->group) 5973 return rte_flow_error_set(error, ENOTSUP, 5974 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5975 NULL, 5976 "groups are not supported"); 5977 #else 5978 uint32_t table = 0; 5979 5980 ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table, 5981 grp_info, error); 5982 if (ret) 5983 return ret; 5984 if (!table) 5985 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 5986 #endif 5987 if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR && 5988 attributes->priority > lowest_priority) 5989 return rte_flow_error_set(error, ENOTSUP, 5990 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 5991 NULL, 5992 "priority out of range"); 5993 if (attributes->transfer) { 5994 if (!priv->config.dv_esw_en) 5995 return rte_flow_error_set 5996 (error, ENOTSUP, 5997 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5998 "E-Switch dr is not supported"); 5999 if (!(priv->representor || priv->master)) 6000 return rte_flow_error_set 6001 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6002 NULL, "E-Switch configuration can only be" 6003 " done by a master or a representor device"); 6004 if (attributes->egress) 6005 return rte_flow_error_set 6006 (error, ENOTSUP, 6007 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes, 6008 "egress is not supported"); 6009 } 6010 if (!(attributes->egress ^ attributes->ingress)) 6011 return rte_flow_error_set(error, ENOTSUP, 6012 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 6013 "must specify exactly one of " 6014 "ingress or egress"); 6015 return ret; 6016 } 6017 6018 /** 6019 * Internal validation function. For validating both actions and items. 6020 * 6021 * @param[in] dev 6022 * Pointer to the rte_eth_dev structure. 6023 * @param[in] attr 6024 * Pointer to the flow attributes. 6025 * @param[in] items 6026 * Pointer to the list of items. 6027 * @param[in] actions 6028 * Pointer to the list of actions. 6029 * @param[in] external 6030 * This flow rule is created by request external to PMD. 6031 * @param[in] hairpin 6032 * Number of hairpin TX actions, 0 means classic flow. 6033 * @param[out] error 6034 * Pointer to the error structure. 6035 * 6036 * @return 6037 * 0 on success, a negative errno value otherwise and rte_errno is set. 6038 */ 6039 static int 6040 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 6041 const struct rte_flow_item items[], 6042 const struct rte_flow_action actions[], 6043 bool external, int hairpin, struct rte_flow_error *error) 6044 { 6045 int ret; 6046 uint64_t action_flags = 0; 6047 uint64_t item_flags = 0; 6048 uint64_t last_item = 0; 6049 uint8_t next_protocol = 0xff; 6050 uint16_t ether_type = 0; 6051 int actions_n = 0; 6052 uint8_t item_ipv6_proto = 0; 6053 int fdb_mirror_limit = 0; 6054 int modify_after_mirror = 0; 6055 const struct rte_flow_item *geneve_item = NULL; 6056 const struct rte_flow_item *gre_item = NULL; 6057 const struct rte_flow_item *gtp_item = NULL; 6058 const struct rte_flow_action_raw_decap *decap; 6059 const struct rte_flow_action_raw_encap *encap; 6060 const struct rte_flow_action_rss *rss = NULL; 6061 const struct rte_flow_action_rss *sample_rss = NULL; 6062 const struct rte_flow_action_count *count = NULL; 6063 const struct rte_flow_action_count *sample_count = NULL; 6064 const struct rte_flow_item_tcp nic_tcp_mask = { 6065 .hdr = { 6066 .tcp_flags = 0xFF, 6067 .src_port = RTE_BE16(UINT16_MAX), 6068 .dst_port = RTE_BE16(UINT16_MAX), 6069 } 6070 }; 6071 const struct rte_flow_item_ipv6 nic_ipv6_mask = { 6072 .hdr = { 6073 .src_addr = 6074 "\xff\xff\xff\xff\xff\xff\xff\xff" 6075 "\xff\xff\xff\xff\xff\xff\xff\xff", 6076 .dst_addr = 6077 "\xff\xff\xff\xff\xff\xff\xff\xff" 6078 "\xff\xff\xff\xff\xff\xff\xff\xff", 6079 .vtc_flow = RTE_BE32(0xffffffff), 6080 .proto = 0xff, 6081 .hop_limits = 0xff, 6082 }, 6083 .has_frag_ext = 1, 6084 }; 6085 const struct rte_flow_item_ecpri nic_ecpri_mask = { 6086 .hdr = { 6087 .common = { 6088 .u32 = 6089 RTE_BE32(((const struct rte_ecpri_common_hdr) { 6090 .type = 0xFF, 6091 }).u32), 6092 }, 6093 .dummy[0] = 0xffffffff, 6094 }, 6095 }; 6096 struct mlx5_priv *priv = dev->data->dev_private; 6097 struct mlx5_dev_config *dev_conf = &priv->config; 6098 uint16_t queue_index = 0xFFFF; 6099 const struct rte_flow_item_vlan *vlan_m = NULL; 6100 uint32_t rw_act_num = 0; 6101 uint64_t is_root; 6102 const struct mlx5_flow_tunnel *tunnel; 6103 struct flow_grp_info grp_info = { 6104 .external = !!external, 6105 .transfer = !!attr->transfer, 6106 .fdb_def_rule = !!priv->fdb_def_rule, 6107 }; 6108 const struct rte_eth_hairpin_conf *conf; 6109 6110 if (items == NULL) 6111 return -1; 6112 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { 6113 tunnel = flow_items_to_tunnel(items); 6114 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | 6115 MLX5_FLOW_ACTION_DECAP; 6116 } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) { 6117 tunnel = flow_actions_to_tunnel(actions); 6118 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; 6119 } else { 6120 tunnel = NULL; 6121 } 6122 if (tunnel && priv->representor) 6123 return rte_flow_error_set(error, ENOTSUP, 6124 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 6125 "decap not supported " 6126 "for VF representor"); 6127 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate 6128 (dev, tunnel, attr, items, actions); 6129 ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); 6130 if (ret < 0) 6131 return ret; 6132 is_root = (uint64_t)ret; 6133 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 6134 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 6135 int type = items->type; 6136 6137 if (!mlx5_flow_os_item_supported(type)) 6138 return rte_flow_error_set(error, ENOTSUP, 6139 RTE_FLOW_ERROR_TYPE_ITEM, 6140 NULL, "item not supported"); 6141 switch (type) { 6142 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: 6143 if (items[0].type != (typeof(items[0].type)) 6144 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) 6145 return rte_flow_error_set 6146 (error, EINVAL, 6147 RTE_FLOW_ERROR_TYPE_ITEM, 6148 NULL, "MLX5 private items " 6149 "must be the first"); 6150 break; 6151 case RTE_FLOW_ITEM_TYPE_VOID: 6152 break; 6153 case RTE_FLOW_ITEM_TYPE_PORT_ID: 6154 ret = flow_dv_validate_item_port_id 6155 (dev, items, attr, item_flags, error); 6156 if (ret < 0) 6157 return ret; 6158 last_item = MLX5_FLOW_ITEM_PORT_ID; 6159 break; 6160 case RTE_FLOW_ITEM_TYPE_ETH: 6161 ret = mlx5_flow_validate_item_eth(items, item_flags, 6162 true, error); 6163 if (ret < 0) 6164 return ret; 6165 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 6166 MLX5_FLOW_LAYER_OUTER_L2; 6167 if (items->mask != NULL && items->spec != NULL) { 6168 ether_type = 6169 ((const struct rte_flow_item_eth *) 6170 items->spec)->type; 6171 ether_type &= 6172 ((const struct rte_flow_item_eth *) 6173 items->mask)->type; 6174 ether_type = rte_be_to_cpu_16(ether_type); 6175 } else { 6176 ether_type = 0; 6177 } 6178 break; 6179 case RTE_FLOW_ITEM_TYPE_VLAN: 6180 ret = flow_dv_validate_item_vlan(items, item_flags, 6181 dev, error); 6182 if (ret < 0) 6183 return ret; 6184 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 6185 MLX5_FLOW_LAYER_OUTER_VLAN; 6186 if (items->mask != NULL && items->spec != NULL) { 6187 ether_type = 6188 ((const struct rte_flow_item_vlan *) 6189 items->spec)->inner_type; 6190 ether_type &= 6191 ((const struct rte_flow_item_vlan *) 6192 items->mask)->inner_type; 6193 ether_type = rte_be_to_cpu_16(ether_type); 6194 } else { 6195 ether_type = 0; 6196 } 6197 /* Store outer VLAN mask for of_push_vlan action. */ 6198 if (!tunnel) 6199 vlan_m = items->mask; 6200 break; 6201 case RTE_FLOW_ITEM_TYPE_IPV4: 6202 mlx5_flow_tunnel_ip_check(items, next_protocol, 6203 &item_flags, &tunnel); 6204 ret = flow_dv_validate_item_ipv4(items, item_flags, 6205 last_item, ether_type, 6206 error); 6207 if (ret < 0) 6208 return ret; 6209 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 6210 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 6211 if (items->mask != NULL && 6212 ((const struct rte_flow_item_ipv4 *) 6213 items->mask)->hdr.next_proto_id) { 6214 next_protocol = 6215 ((const struct rte_flow_item_ipv4 *) 6216 (items->spec))->hdr.next_proto_id; 6217 next_protocol &= 6218 ((const struct rte_flow_item_ipv4 *) 6219 (items->mask))->hdr.next_proto_id; 6220 } else { 6221 /* Reset for inner layer. */ 6222 next_protocol = 0xff; 6223 } 6224 break; 6225 case RTE_FLOW_ITEM_TYPE_IPV6: 6226 mlx5_flow_tunnel_ip_check(items, next_protocol, 6227 &item_flags, &tunnel); 6228 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 6229 last_item, 6230 ether_type, 6231 &nic_ipv6_mask, 6232 error); 6233 if (ret < 0) 6234 return ret; 6235 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 6236 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 6237 if (items->mask != NULL && 6238 ((const struct rte_flow_item_ipv6 *) 6239 items->mask)->hdr.proto) { 6240 item_ipv6_proto = 6241 ((const struct rte_flow_item_ipv6 *) 6242 items->spec)->hdr.proto; 6243 next_protocol = 6244 ((const struct rte_flow_item_ipv6 *) 6245 items->spec)->hdr.proto; 6246 next_protocol &= 6247 ((const struct rte_flow_item_ipv6 *) 6248 items->mask)->hdr.proto; 6249 } else { 6250 /* Reset for inner layer. */ 6251 next_protocol = 0xff; 6252 } 6253 break; 6254 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: 6255 ret = flow_dv_validate_item_ipv6_frag_ext(items, 6256 item_flags, 6257 error); 6258 if (ret < 0) 6259 return ret; 6260 last_item = tunnel ? 6261 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : 6262 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; 6263 if (items->mask != NULL && 6264 ((const struct rte_flow_item_ipv6_frag_ext *) 6265 items->mask)->hdr.next_header) { 6266 next_protocol = 6267 ((const struct rte_flow_item_ipv6_frag_ext *) 6268 items->spec)->hdr.next_header; 6269 next_protocol &= 6270 ((const struct rte_flow_item_ipv6_frag_ext *) 6271 items->mask)->hdr.next_header; 6272 } else { 6273 /* Reset for inner layer. */ 6274 next_protocol = 0xff; 6275 } 6276 break; 6277 case RTE_FLOW_ITEM_TYPE_TCP: 6278 ret = mlx5_flow_validate_item_tcp 6279 (items, item_flags, 6280 next_protocol, 6281 &nic_tcp_mask, 6282 error); 6283 if (ret < 0) 6284 return ret; 6285 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 6286 MLX5_FLOW_LAYER_OUTER_L4_TCP; 6287 break; 6288 case RTE_FLOW_ITEM_TYPE_UDP: 6289 ret = mlx5_flow_validate_item_udp(items, item_flags, 6290 next_protocol, 6291 error); 6292 if (ret < 0) 6293 return ret; 6294 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 6295 MLX5_FLOW_LAYER_OUTER_L4_UDP; 6296 break; 6297 case RTE_FLOW_ITEM_TYPE_GRE: 6298 ret = mlx5_flow_validate_item_gre(items, item_flags, 6299 next_protocol, error); 6300 if (ret < 0) 6301 return ret; 6302 gre_item = items; 6303 last_item = MLX5_FLOW_LAYER_GRE; 6304 break; 6305 case RTE_FLOW_ITEM_TYPE_NVGRE: 6306 ret = mlx5_flow_validate_item_nvgre(items, item_flags, 6307 next_protocol, 6308 error); 6309 if (ret < 0) 6310 return ret; 6311 last_item = MLX5_FLOW_LAYER_NVGRE; 6312 break; 6313 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 6314 ret = mlx5_flow_validate_item_gre_key 6315 (items, item_flags, gre_item, error); 6316 if (ret < 0) 6317 return ret; 6318 last_item = MLX5_FLOW_LAYER_GRE_KEY; 6319 break; 6320 case RTE_FLOW_ITEM_TYPE_VXLAN: 6321 ret = mlx5_flow_validate_item_vxlan(items, item_flags, 6322 error); 6323 if (ret < 0) 6324 return ret; 6325 last_item = MLX5_FLOW_LAYER_VXLAN; 6326 break; 6327 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 6328 ret = mlx5_flow_validate_item_vxlan_gpe(items, 6329 item_flags, dev, 6330 error); 6331 if (ret < 0) 6332 return ret; 6333 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 6334 break; 6335 case RTE_FLOW_ITEM_TYPE_GENEVE: 6336 ret = mlx5_flow_validate_item_geneve(items, 6337 item_flags, dev, 6338 error); 6339 if (ret < 0) 6340 return ret; 6341 geneve_item = items; 6342 last_item = MLX5_FLOW_LAYER_GENEVE; 6343 break; 6344 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: 6345 ret = mlx5_flow_validate_item_geneve_opt(items, 6346 last_item, 6347 geneve_item, 6348 dev, 6349 error); 6350 if (ret < 0) 6351 return ret; 6352 last_item = MLX5_FLOW_LAYER_GENEVE_OPT; 6353 break; 6354 case RTE_FLOW_ITEM_TYPE_MPLS: 6355 ret = mlx5_flow_validate_item_mpls(dev, items, 6356 item_flags, 6357 last_item, error); 6358 if (ret < 0) 6359 return ret; 6360 last_item = MLX5_FLOW_LAYER_MPLS; 6361 break; 6362 6363 case RTE_FLOW_ITEM_TYPE_MARK: 6364 ret = flow_dv_validate_item_mark(dev, items, attr, 6365 error); 6366 if (ret < 0) 6367 return ret; 6368 last_item = MLX5_FLOW_ITEM_MARK; 6369 break; 6370 case RTE_FLOW_ITEM_TYPE_META: 6371 ret = flow_dv_validate_item_meta(dev, items, attr, 6372 error); 6373 if (ret < 0) 6374 return ret; 6375 last_item = MLX5_FLOW_ITEM_METADATA; 6376 break; 6377 case RTE_FLOW_ITEM_TYPE_ICMP: 6378 ret = mlx5_flow_validate_item_icmp(items, item_flags, 6379 next_protocol, 6380 error); 6381 if (ret < 0) 6382 return ret; 6383 last_item = MLX5_FLOW_LAYER_ICMP; 6384 break; 6385 case RTE_FLOW_ITEM_TYPE_ICMP6: 6386 ret = mlx5_flow_validate_item_icmp6(items, item_flags, 6387 next_protocol, 6388 error); 6389 if (ret < 0) 6390 return ret; 6391 item_ipv6_proto = IPPROTO_ICMPV6; 6392 last_item = MLX5_FLOW_LAYER_ICMP6; 6393 break; 6394 case RTE_FLOW_ITEM_TYPE_TAG: 6395 ret = flow_dv_validate_item_tag(dev, items, 6396 attr, error); 6397 if (ret < 0) 6398 return ret; 6399 last_item = MLX5_FLOW_ITEM_TAG; 6400 break; 6401 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 6402 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 6403 break; 6404 case RTE_FLOW_ITEM_TYPE_GTP: 6405 ret = flow_dv_validate_item_gtp(dev, items, item_flags, 6406 error); 6407 if (ret < 0) 6408 return ret; 6409 gtp_item = items; 6410 last_item = MLX5_FLOW_LAYER_GTP; 6411 break; 6412 case RTE_FLOW_ITEM_TYPE_GTP_PSC: 6413 ret = flow_dv_validate_item_gtp_psc(items, last_item, 6414 gtp_item, attr, 6415 error); 6416 if (ret < 0) 6417 return ret; 6418 last_item = MLX5_FLOW_LAYER_GTP_PSC; 6419 break; 6420 case RTE_FLOW_ITEM_TYPE_ECPRI: 6421 /* Capacity will be checked in the translate stage. */ 6422 ret = mlx5_flow_validate_item_ecpri(items, item_flags, 6423 last_item, 6424 ether_type, 6425 &nic_ecpri_mask, 6426 error); 6427 if (ret < 0) 6428 return ret; 6429 last_item = MLX5_FLOW_LAYER_ECPRI; 6430 break; 6431 default: 6432 return rte_flow_error_set(error, ENOTSUP, 6433 RTE_FLOW_ERROR_TYPE_ITEM, 6434 NULL, "item not supported"); 6435 } 6436 item_flags |= last_item; 6437 } 6438 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 6439 int type = actions->type; 6440 6441 if (!mlx5_flow_os_action_supported(type)) 6442 return rte_flow_error_set(error, ENOTSUP, 6443 RTE_FLOW_ERROR_TYPE_ACTION, 6444 actions, 6445 "action not supported"); 6446 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 6447 return rte_flow_error_set(error, ENOTSUP, 6448 RTE_FLOW_ERROR_TYPE_ACTION, 6449 actions, "too many actions"); 6450 switch (type) { 6451 case RTE_FLOW_ACTION_TYPE_VOID: 6452 break; 6453 case RTE_FLOW_ACTION_TYPE_PORT_ID: 6454 ret = flow_dv_validate_action_port_id(dev, 6455 action_flags, 6456 actions, 6457 attr, 6458 error); 6459 if (ret) 6460 return ret; 6461 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 6462 ++actions_n; 6463 break; 6464 case RTE_FLOW_ACTION_TYPE_FLAG: 6465 ret = flow_dv_validate_action_flag(dev, action_flags, 6466 attr, error); 6467 if (ret < 0) 6468 return ret; 6469 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 6470 /* Count all modify-header actions as one. */ 6471 if (!(action_flags & 6472 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6473 ++actions_n; 6474 action_flags |= MLX5_FLOW_ACTION_FLAG | 6475 MLX5_FLOW_ACTION_MARK_EXT; 6476 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6477 modify_after_mirror = 1; 6478 6479 } else { 6480 action_flags |= MLX5_FLOW_ACTION_FLAG; 6481 ++actions_n; 6482 } 6483 rw_act_num += MLX5_ACT_NUM_SET_MARK; 6484 break; 6485 case RTE_FLOW_ACTION_TYPE_MARK: 6486 ret = flow_dv_validate_action_mark(dev, actions, 6487 action_flags, 6488 attr, error); 6489 if (ret < 0) 6490 return ret; 6491 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 6492 /* Count all modify-header actions as one. */ 6493 if (!(action_flags & 6494 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6495 ++actions_n; 6496 action_flags |= MLX5_FLOW_ACTION_MARK | 6497 MLX5_FLOW_ACTION_MARK_EXT; 6498 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6499 modify_after_mirror = 1; 6500 } else { 6501 action_flags |= MLX5_FLOW_ACTION_MARK; 6502 ++actions_n; 6503 } 6504 rw_act_num += MLX5_ACT_NUM_SET_MARK; 6505 break; 6506 case RTE_FLOW_ACTION_TYPE_SET_META: 6507 ret = flow_dv_validate_action_set_meta(dev, actions, 6508 action_flags, 6509 attr, error); 6510 if (ret < 0) 6511 return ret; 6512 /* Count all modify-header actions as one action. */ 6513 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6514 ++actions_n; 6515 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6516 modify_after_mirror = 1; 6517 action_flags |= MLX5_FLOW_ACTION_SET_META; 6518 rw_act_num += MLX5_ACT_NUM_SET_META; 6519 break; 6520 case RTE_FLOW_ACTION_TYPE_SET_TAG: 6521 ret = flow_dv_validate_action_set_tag(dev, actions, 6522 action_flags, 6523 attr, error); 6524 if (ret < 0) 6525 return ret; 6526 /* Count all modify-header actions as one action. */ 6527 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6528 ++actions_n; 6529 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6530 modify_after_mirror = 1; 6531 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 6532 rw_act_num += MLX5_ACT_NUM_SET_TAG; 6533 break; 6534 case RTE_FLOW_ACTION_TYPE_DROP: 6535 ret = mlx5_flow_validate_action_drop(action_flags, 6536 attr, error); 6537 if (ret < 0) 6538 return ret; 6539 action_flags |= MLX5_FLOW_ACTION_DROP; 6540 ++actions_n; 6541 break; 6542 case RTE_FLOW_ACTION_TYPE_QUEUE: 6543 ret = mlx5_flow_validate_action_queue(actions, 6544 action_flags, dev, 6545 attr, error); 6546 if (ret < 0) 6547 return ret; 6548 queue_index = ((const struct rte_flow_action_queue *) 6549 (actions->conf))->index; 6550 action_flags |= MLX5_FLOW_ACTION_QUEUE; 6551 ++actions_n; 6552 break; 6553 case RTE_FLOW_ACTION_TYPE_RSS: 6554 rss = actions->conf; 6555 ret = mlx5_flow_validate_action_rss(actions, 6556 action_flags, dev, 6557 attr, item_flags, 6558 error); 6559 if (ret < 0) 6560 return ret; 6561 if (rss && sample_rss && 6562 (sample_rss->level != rss->level || 6563 sample_rss->types != rss->types)) 6564 return rte_flow_error_set(error, ENOTSUP, 6565 RTE_FLOW_ERROR_TYPE_ACTION, 6566 NULL, 6567 "Can't use the different RSS types " 6568 "or level in the same flow"); 6569 if (rss != NULL && rss->queue_num) 6570 queue_index = rss->queue[0]; 6571 action_flags |= MLX5_FLOW_ACTION_RSS; 6572 ++actions_n; 6573 break; 6574 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 6575 ret = 6576 mlx5_flow_validate_action_default_miss(action_flags, 6577 attr, error); 6578 if (ret < 0) 6579 return ret; 6580 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 6581 ++actions_n; 6582 break; 6583 case RTE_FLOW_ACTION_TYPE_COUNT: 6584 ret = flow_dv_validate_action_count(dev, actions, 6585 action_flags, 6586 error); 6587 if (ret < 0) 6588 return ret; 6589 count = actions->conf; 6590 action_flags |= MLX5_FLOW_ACTION_COUNT; 6591 ++actions_n; 6592 break; 6593 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 6594 if (flow_dv_validate_action_pop_vlan(dev, 6595 action_flags, 6596 actions, 6597 item_flags, attr, 6598 error)) 6599 return -rte_errno; 6600 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 6601 ++actions_n; 6602 break; 6603 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 6604 ret = flow_dv_validate_action_push_vlan(dev, 6605 action_flags, 6606 vlan_m, 6607 actions, attr, 6608 error); 6609 if (ret < 0) 6610 return ret; 6611 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 6612 ++actions_n; 6613 break; 6614 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 6615 ret = flow_dv_validate_action_set_vlan_pcp 6616 (action_flags, actions, error); 6617 if (ret < 0) 6618 return ret; 6619 /* Count PCP with push_vlan command. */ 6620 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP; 6621 break; 6622 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 6623 ret = flow_dv_validate_action_set_vlan_vid 6624 (item_flags, action_flags, 6625 actions, error); 6626 if (ret < 0) 6627 return ret; 6628 /* Count VID with push_vlan command. */ 6629 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 6630 rw_act_num += MLX5_ACT_NUM_MDF_VID; 6631 break; 6632 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 6633 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 6634 ret = flow_dv_validate_action_l2_encap(dev, 6635 action_flags, 6636 actions, attr, 6637 error); 6638 if (ret < 0) 6639 return ret; 6640 action_flags |= MLX5_FLOW_ACTION_ENCAP; 6641 ++actions_n; 6642 break; 6643 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 6644 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 6645 ret = flow_dv_validate_action_decap(dev, action_flags, 6646 actions, item_flags, 6647 attr, error); 6648 if (ret < 0) 6649 return ret; 6650 action_flags |= MLX5_FLOW_ACTION_DECAP; 6651 ++actions_n; 6652 break; 6653 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 6654 ret = flow_dv_validate_action_raw_encap_decap 6655 (dev, NULL, actions->conf, attr, &action_flags, 6656 &actions_n, actions, item_flags, error); 6657 if (ret < 0) 6658 return ret; 6659 break; 6660 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 6661 decap = actions->conf; 6662 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) 6663 ; 6664 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 6665 encap = NULL; 6666 actions--; 6667 } else { 6668 encap = actions->conf; 6669 } 6670 ret = flow_dv_validate_action_raw_encap_decap 6671 (dev, 6672 decap ? decap : &empty_decap, encap, 6673 attr, &action_flags, &actions_n, 6674 actions, item_flags, error); 6675 if (ret < 0) 6676 return ret; 6677 break; 6678 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 6679 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 6680 ret = flow_dv_validate_action_modify_mac(action_flags, 6681 actions, 6682 item_flags, 6683 error); 6684 if (ret < 0) 6685 return ret; 6686 /* Count all modify-header actions as one action. */ 6687 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6688 ++actions_n; 6689 action_flags |= actions->type == 6690 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 6691 MLX5_FLOW_ACTION_SET_MAC_SRC : 6692 MLX5_FLOW_ACTION_SET_MAC_DST; 6693 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6694 modify_after_mirror = 1; 6695 /* 6696 * Even if the source and destination MAC addresses have 6697 * overlap in the header with 4B alignment, the convert 6698 * function will handle them separately and 4 SW actions 6699 * will be created. And 2 actions will be added each 6700 * time no matter how many bytes of address will be set. 6701 */ 6702 rw_act_num += MLX5_ACT_NUM_MDF_MAC; 6703 break; 6704 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 6705 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 6706 ret = flow_dv_validate_action_modify_ipv4(action_flags, 6707 actions, 6708 item_flags, 6709 error); 6710 if (ret < 0) 6711 return ret; 6712 /* Count all modify-header actions as one action. */ 6713 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6714 ++actions_n; 6715 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6716 modify_after_mirror = 1; 6717 action_flags |= actions->type == 6718 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 6719 MLX5_FLOW_ACTION_SET_IPV4_SRC : 6720 MLX5_FLOW_ACTION_SET_IPV4_DST; 6721 rw_act_num += MLX5_ACT_NUM_MDF_IPV4; 6722 break; 6723 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 6724 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 6725 ret = flow_dv_validate_action_modify_ipv6(action_flags, 6726 actions, 6727 item_flags, 6728 error); 6729 if (ret < 0) 6730 return ret; 6731 if (item_ipv6_proto == IPPROTO_ICMPV6) 6732 return rte_flow_error_set(error, ENOTSUP, 6733 RTE_FLOW_ERROR_TYPE_ACTION, 6734 actions, 6735 "Can't change header " 6736 "with ICMPv6 proto"); 6737 /* Count all modify-header actions as one action. */ 6738 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6739 ++actions_n; 6740 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6741 modify_after_mirror = 1; 6742 action_flags |= actions->type == 6743 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 6744 MLX5_FLOW_ACTION_SET_IPV6_SRC : 6745 MLX5_FLOW_ACTION_SET_IPV6_DST; 6746 rw_act_num += MLX5_ACT_NUM_MDF_IPV6; 6747 break; 6748 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 6749 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 6750 ret = flow_dv_validate_action_modify_tp(action_flags, 6751 actions, 6752 item_flags, 6753 error); 6754 if (ret < 0) 6755 return ret; 6756 /* Count all modify-header actions as one action. */ 6757 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6758 ++actions_n; 6759 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6760 modify_after_mirror = 1; 6761 action_flags |= actions->type == 6762 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 6763 MLX5_FLOW_ACTION_SET_TP_SRC : 6764 MLX5_FLOW_ACTION_SET_TP_DST; 6765 rw_act_num += MLX5_ACT_NUM_MDF_PORT; 6766 break; 6767 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 6768 case RTE_FLOW_ACTION_TYPE_SET_TTL: 6769 ret = flow_dv_validate_action_modify_ttl(action_flags, 6770 actions, 6771 item_flags, 6772 error); 6773 if (ret < 0) 6774 return ret; 6775 /* Count all modify-header actions as one action. */ 6776 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6777 ++actions_n; 6778 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6779 modify_after_mirror = 1; 6780 action_flags |= actions->type == 6781 RTE_FLOW_ACTION_TYPE_SET_TTL ? 6782 MLX5_FLOW_ACTION_SET_TTL : 6783 MLX5_FLOW_ACTION_DEC_TTL; 6784 rw_act_num += MLX5_ACT_NUM_MDF_TTL; 6785 break; 6786 case RTE_FLOW_ACTION_TYPE_JUMP: 6787 ret = flow_dv_validate_action_jump(dev, tunnel, actions, 6788 action_flags, 6789 attr, external, 6790 error); 6791 if (ret) 6792 return ret; 6793 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && 6794 fdb_mirror_limit) 6795 return rte_flow_error_set(error, EINVAL, 6796 RTE_FLOW_ERROR_TYPE_ACTION, 6797 NULL, 6798 "sample and jump action combination is not supported"); 6799 ++actions_n; 6800 action_flags |= MLX5_FLOW_ACTION_JUMP; 6801 break; 6802 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 6803 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 6804 ret = flow_dv_validate_action_modify_tcp_seq 6805 (action_flags, 6806 actions, 6807 item_flags, 6808 error); 6809 if (ret < 0) 6810 return ret; 6811 /* Count all modify-header actions as one action. */ 6812 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6813 ++actions_n; 6814 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6815 modify_after_mirror = 1; 6816 action_flags |= actions->type == 6817 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 6818 MLX5_FLOW_ACTION_INC_TCP_SEQ : 6819 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 6820 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ; 6821 break; 6822 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 6823 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 6824 ret = flow_dv_validate_action_modify_tcp_ack 6825 (action_flags, 6826 actions, 6827 item_flags, 6828 error); 6829 if (ret < 0) 6830 return ret; 6831 /* Count all modify-header actions as one action. */ 6832 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6833 ++actions_n; 6834 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6835 modify_after_mirror = 1; 6836 action_flags |= actions->type == 6837 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 6838 MLX5_FLOW_ACTION_INC_TCP_ACK : 6839 MLX5_FLOW_ACTION_DEC_TCP_ACK; 6840 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK; 6841 break; 6842 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 6843 break; 6844 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 6845 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 6846 rw_act_num += MLX5_ACT_NUM_SET_TAG; 6847 break; 6848 case RTE_FLOW_ACTION_TYPE_METER: 6849 ret = mlx5_flow_validate_action_meter(dev, 6850 action_flags, 6851 actions, attr, 6852 error); 6853 if (ret < 0) 6854 return ret; 6855 action_flags |= MLX5_FLOW_ACTION_METER; 6856 ++actions_n; 6857 /* Meter action will add one more TAG action. */ 6858 rw_act_num += MLX5_ACT_NUM_SET_TAG; 6859 break; 6860 case MLX5_RTE_FLOW_ACTION_TYPE_AGE: 6861 if (!attr->transfer && !attr->group) 6862 return rte_flow_error_set(error, ENOTSUP, 6863 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6864 NULL, 6865 "Shared ASO age action is not supported for group 0"); 6866 action_flags |= MLX5_FLOW_ACTION_AGE; 6867 ++actions_n; 6868 break; 6869 case RTE_FLOW_ACTION_TYPE_AGE: 6870 ret = flow_dv_validate_action_age(action_flags, 6871 actions, dev, 6872 error); 6873 if (ret < 0) 6874 return ret; 6875 /* 6876 * Validate the regular AGE action (using counter) 6877 * mutual exclusion with share counter actions. 6878 */ 6879 if (!priv->sh->flow_hit_aso_en) { 6880 if (count && count->shared) 6881 return rte_flow_error_set 6882 (error, EINVAL, 6883 RTE_FLOW_ERROR_TYPE_ACTION, 6884 NULL, 6885 "old age and shared count combination is not supported"); 6886 if (sample_count) 6887 return rte_flow_error_set 6888 (error, EINVAL, 6889 RTE_FLOW_ERROR_TYPE_ACTION, 6890 NULL, 6891 "old age action and count must be in the same sub flow"); 6892 } 6893 action_flags |= MLX5_FLOW_ACTION_AGE; 6894 ++actions_n; 6895 break; 6896 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 6897 ret = flow_dv_validate_action_modify_ipv4_dscp 6898 (action_flags, 6899 actions, 6900 item_flags, 6901 error); 6902 if (ret < 0) 6903 return ret; 6904 /* Count all modify-header actions as one action. */ 6905 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6906 ++actions_n; 6907 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6908 modify_after_mirror = 1; 6909 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 6910 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 6911 break; 6912 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 6913 ret = flow_dv_validate_action_modify_ipv6_dscp 6914 (action_flags, 6915 actions, 6916 item_flags, 6917 error); 6918 if (ret < 0) 6919 return ret; 6920 /* Count all modify-header actions as one action. */ 6921 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6922 ++actions_n; 6923 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 6924 modify_after_mirror = 1; 6925 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 6926 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 6927 break; 6928 case RTE_FLOW_ACTION_TYPE_SAMPLE: 6929 ret = flow_dv_validate_action_sample(&action_flags, 6930 actions, dev, 6931 attr, item_flags, 6932 rss, &sample_rss, 6933 &sample_count, 6934 &fdb_mirror_limit, 6935 error); 6936 if (ret < 0) 6937 return ret; 6938 action_flags |= MLX5_FLOW_ACTION_SAMPLE; 6939 ++actions_n; 6940 break; 6941 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: 6942 if (actions[0].type != (typeof(actions[0].type)) 6943 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) 6944 return rte_flow_error_set 6945 (error, EINVAL, 6946 RTE_FLOW_ERROR_TYPE_ACTION, 6947 NULL, "MLX5 private action " 6948 "must be the first"); 6949 6950 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; 6951 break; 6952 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 6953 ret = flow_dv_validate_action_modify_field(dev, 6954 action_flags, 6955 actions, 6956 attr, 6957 error); 6958 if (ret < 0) 6959 return ret; 6960 /* Count all modify-header actions as one action. */ 6961 if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD)) 6962 ++actions_n; 6963 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; 6964 rw_act_num += ret; 6965 break; 6966 default: 6967 return rte_flow_error_set(error, ENOTSUP, 6968 RTE_FLOW_ERROR_TYPE_ACTION, 6969 actions, 6970 "action not supported"); 6971 } 6972 } 6973 /* 6974 * Validate actions in flow rules 6975 * - Explicit decap action is prohibited by the tunnel offload API. 6976 * - Drop action in tunnel steer rule is prohibited by the API. 6977 * - Application cannot use MARK action because it's value can mask 6978 * tunnel default miss nitification. 6979 * - JUMP in tunnel match rule has no support in current PMD 6980 * implementation. 6981 * - TAG & META are reserved for future uses. 6982 */ 6983 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) { 6984 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP | 6985 MLX5_FLOW_ACTION_MARK | 6986 MLX5_FLOW_ACTION_SET_TAG | 6987 MLX5_FLOW_ACTION_SET_META | 6988 MLX5_FLOW_ACTION_DROP; 6989 6990 if (action_flags & bad_actions_mask) 6991 return rte_flow_error_set 6992 (error, EINVAL, 6993 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 6994 "Invalid RTE action in tunnel " 6995 "set decap rule"); 6996 if (!(action_flags & MLX5_FLOW_ACTION_JUMP)) 6997 return rte_flow_error_set 6998 (error, EINVAL, 6999 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 7000 "tunnel set decap rule must terminate " 7001 "with JUMP"); 7002 if (!attr->ingress) 7003 return rte_flow_error_set 7004 (error, EINVAL, 7005 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 7006 "tunnel flows for ingress traffic only"); 7007 } 7008 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) { 7009 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP | 7010 MLX5_FLOW_ACTION_MARK | 7011 MLX5_FLOW_ACTION_SET_TAG | 7012 MLX5_FLOW_ACTION_SET_META; 7013 7014 if (action_flags & bad_actions_mask) 7015 return rte_flow_error_set 7016 (error, EINVAL, 7017 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 7018 "Invalid RTE action in tunnel " 7019 "set match rule"); 7020 } 7021 /* 7022 * Validate the drop action mutual exclusion with other actions. 7023 * Drop action is mutually-exclusive with any other action, except for 7024 * Count action. 7025 * Drop action compatibility with tunnel offload was already validated. 7026 */ 7027 if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH | 7028 MLX5_FLOW_ACTION_TUNNEL_MATCH)); 7029 else if ((action_flags & MLX5_FLOW_ACTION_DROP) && 7030 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) 7031 return rte_flow_error_set(error, EINVAL, 7032 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 7033 "Drop action is mutually-exclusive " 7034 "with any other action, except for " 7035 "Count action"); 7036 /* Eswitch has few restrictions on using items and actions */ 7037 if (attr->transfer) { 7038 if (!mlx5_flow_ext_mreg_supported(dev) && 7039 action_flags & MLX5_FLOW_ACTION_FLAG) 7040 return rte_flow_error_set(error, ENOTSUP, 7041 RTE_FLOW_ERROR_TYPE_ACTION, 7042 NULL, 7043 "unsupported action FLAG"); 7044 if (!mlx5_flow_ext_mreg_supported(dev) && 7045 action_flags & MLX5_FLOW_ACTION_MARK) 7046 return rte_flow_error_set(error, ENOTSUP, 7047 RTE_FLOW_ERROR_TYPE_ACTION, 7048 NULL, 7049 "unsupported action MARK"); 7050 if (action_flags & MLX5_FLOW_ACTION_QUEUE) 7051 return rte_flow_error_set(error, ENOTSUP, 7052 RTE_FLOW_ERROR_TYPE_ACTION, 7053 NULL, 7054 "unsupported action QUEUE"); 7055 if (action_flags & MLX5_FLOW_ACTION_RSS) 7056 return rte_flow_error_set(error, ENOTSUP, 7057 RTE_FLOW_ERROR_TYPE_ACTION, 7058 NULL, 7059 "unsupported action RSS"); 7060 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 7061 return rte_flow_error_set(error, EINVAL, 7062 RTE_FLOW_ERROR_TYPE_ACTION, 7063 actions, 7064 "no fate action is found"); 7065 } else { 7066 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) 7067 return rte_flow_error_set(error, EINVAL, 7068 RTE_FLOW_ERROR_TYPE_ACTION, 7069 actions, 7070 "no fate action is found"); 7071 } 7072 /* 7073 * Continue validation for Xcap and VLAN actions. 7074 * If hairpin is working in explicit TX rule mode, there is no actions 7075 * splitting and the validation of hairpin ingress flow should be the 7076 * same as other standard flows. 7077 */ 7078 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | 7079 MLX5_FLOW_VLAN_ACTIONS)) && 7080 (queue_index == 0xFFFF || 7081 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN || 7082 ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL && 7083 conf->tx_explicit != 0))) { 7084 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == 7085 MLX5_FLOW_XCAP_ACTIONS) 7086 return rte_flow_error_set(error, ENOTSUP, 7087 RTE_FLOW_ERROR_TYPE_ACTION, 7088 NULL, "encap and decap " 7089 "combination aren't supported"); 7090 if (!attr->transfer && attr->ingress) { 7091 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 7092 return rte_flow_error_set 7093 (error, ENOTSUP, 7094 RTE_FLOW_ERROR_TYPE_ACTION, 7095 NULL, "encap is not supported" 7096 " for ingress traffic"); 7097 else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) 7098 return rte_flow_error_set 7099 (error, ENOTSUP, 7100 RTE_FLOW_ERROR_TYPE_ACTION, 7101 NULL, "push VLAN action not " 7102 "supported for ingress"); 7103 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == 7104 MLX5_FLOW_VLAN_ACTIONS) 7105 return rte_flow_error_set 7106 (error, ENOTSUP, 7107 RTE_FLOW_ERROR_TYPE_ACTION, 7108 NULL, "no support for " 7109 "multiple VLAN actions"); 7110 } 7111 } 7112 /* 7113 * Hairpin flow will add one more TAG action in TX implicit mode. 7114 * In TX explicit mode, there will be no hairpin flow ID. 7115 */ 7116 if (hairpin > 0) 7117 rw_act_num += MLX5_ACT_NUM_SET_TAG; 7118 /* extra metadata enabled: one more TAG action will be add. */ 7119 if (dev_conf->dv_flow_en && 7120 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 7121 mlx5_flow_ext_mreg_supported(dev)) 7122 rw_act_num += MLX5_ACT_NUM_SET_TAG; 7123 if (rw_act_num > 7124 flow_dv_modify_hdr_action_max(dev, is_root)) { 7125 return rte_flow_error_set(error, ENOTSUP, 7126 RTE_FLOW_ERROR_TYPE_ACTION, 7127 NULL, "too many header modify" 7128 " actions to support"); 7129 } 7130 /* Eswitch egress mirror and modify flow has limitation on CX5 */ 7131 if (fdb_mirror_limit && modify_after_mirror) 7132 return rte_flow_error_set(error, EINVAL, 7133 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 7134 "sample before modify action is not supported"); 7135 return 0; 7136 } 7137 7138 /** 7139 * Internal preparation function. Allocates the DV flow size, 7140 * this size is constant. 7141 * 7142 * @param[in] dev 7143 * Pointer to the rte_eth_dev structure. 7144 * @param[in] attr 7145 * Pointer to the flow attributes. 7146 * @param[in] items 7147 * Pointer to the list of items. 7148 * @param[in] actions 7149 * Pointer to the list of actions. 7150 * @param[out] error 7151 * Pointer to the error structure. 7152 * 7153 * @return 7154 * Pointer to mlx5_flow object on success, 7155 * otherwise NULL and rte_errno is set. 7156 */ 7157 static struct mlx5_flow * 7158 flow_dv_prepare(struct rte_eth_dev *dev, 7159 const struct rte_flow_attr *attr __rte_unused, 7160 const struct rte_flow_item items[] __rte_unused, 7161 const struct rte_flow_action actions[] __rte_unused, 7162 struct rte_flow_error *error) 7163 { 7164 uint32_t handle_idx = 0; 7165 struct mlx5_flow *dev_flow; 7166 struct mlx5_flow_handle *dev_handle; 7167 struct mlx5_priv *priv = dev->data->dev_private; 7168 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 7169 7170 MLX5_ASSERT(wks); 7171 /* In case of corrupting the memory. */ 7172 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { 7173 rte_flow_error_set(error, ENOSPC, 7174 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7175 "not free temporary device flow"); 7176 return NULL; 7177 } 7178 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 7179 &handle_idx); 7180 if (!dev_handle) { 7181 rte_flow_error_set(error, ENOMEM, 7182 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7183 "not enough memory to create flow handle"); 7184 return NULL; 7185 } 7186 MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows)); 7187 dev_flow = &wks->flows[wks->flow_idx++]; 7188 memset(dev_flow, 0, sizeof(*dev_flow)); 7189 dev_flow->handle = dev_handle; 7190 dev_flow->handle_idx = handle_idx; 7191 /* 7192 * In some old rdma-core releases, before continuing, a check of the 7193 * length of matching parameter will be done at first. It needs to use 7194 * the length without misc4 param. If the flow has misc4 support, then 7195 * the length needs to be adjusted accordingly. Each param member is 7196 * aligned with a 64B boundary naturally. 7197 */ 7198 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - 7199 MLX5_ST_SZ_BYTES(fte_match_set_misc4); 7200 dev_flow->ingress = attr->ingress; 7201 dev_flow->dv.transfer = attr->transfer; 7202 return dev_flow; 7203 } 7204 7205 #ifdef RTE_LIBRTE_MLX5_DEBUG 7206 /** 7207 * Sanity check for match mask and value. Similar to check_valid_spec() in 7208 * kernel driver. If unmasked bit is present in value, it returns failure. 7209 * 7210 * @param match_mask 7211 * pointer to match mask buffer. 7212 * @param match_value 7213 * pointer to match value buffer. 7214 * 7215 * @return 7216 * 0 if valid, -EINVAL otherwise. 7217 */ 7218 static int 7219 flow_dv_check_valid_spec(void *match_mask, void *match_value) 7220 { 7221 uint8_t *m = match_mask; 7222 uint8_t *v = match_value; 7223 unsigned int i; 7224 7225 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) { 7226 if (v[i] & ~m[i]) { 7227 DRV_LOG(ERR, 7228 "match_value differs from match_criteria" 7229 " %p[%u] != %p[%u]", 7230 match_value, i, match_mask, i); 7231 return -EINVAL; 7232 } 7233 } 7234 return 0; 7235 } 7236 #endif 7237 7238 /** 7239 * Add match of ip_version. 7240 * 7241 * @param[in] group 7242 * Flow group. 7243 * @param[in] headers_v 7244 * Values header pointer. 7245 * @param[in] headers_m 7246 * Masks header pointer. 7247 * @param[in] ip_version 7248 * The IP version to set. 7249 */ 7250 static inline void 7251 flow_dv_set_match_ip_version(uint32_t group, 7252 void *headers_v, 7253 void *headers_m, 7254 uint8_t ip_version) 7255 { 7256 if (group == 0) 7257 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); 7258 else 7259 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 7260 ip_version); 7261 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version); 7262 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0); 7263 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0); 7264 } 7265 7266 /** 7267 * Add Ethernet item to matcher and to the value. 7268 * 7269 * @param[in, out] matcher 7270 * Flow matcher. 7271 * @param[in, out] key 7272 * Flow matcher value. 7273 * @param[in] item 7274 * Flow pattern to translate. 7275 * @param[in] inner 7276 * Item is inner pattern. 7277 */ 7278 static void 7279 flow_dv_translate_item_eth(void *matcher, void *key, 7280 const struct rte_flow_item *item, int inner, 7281 uint32_t group) 7282 { 7283 const struct rte_flow_item_eth *eth_m = item->mask; 7284 const struct rte_flow_item_eth *eth_v = item->spec; 7285 const struct rte_flow_item_eth nic_mask = { 7286 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 7287 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 7288 .type = RTE_BE16(0xffff), 7289 .has_vlan = 0, 7290 }; 7291 void *hdrs_m; 7292 void *hdrs_v; 7293 char *l24_v; 7294 unsigned int i; 7295 7296 if (!eth_v) 7297 return; 7298 if (!eth_m) 7299 eth_m = &nic_mask; 7300 if (inner) { 7301 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, 7302 inner_headers); 7303 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7304 } else { 7305 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, 7306 outer_headers); 7307 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7308 } 7309 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16), 7310 ð_m->dst, sizeof(eth_m->dst)); 7311 /* The value must be in the range of the mask. */ 7312 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16); 7313 for (i = 0; i < sizeof(eth_m->dst); ++i) 7314 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; 7315 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16), 7316 ð_m->src, sizeof(eth_m->src)); 7317 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16); 7318 /* The value must be in the range of the mask. */ 7319 for (i = 0; i < sizeof(eth_m->dst); ++i) 7320 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; 7321 /* 7322 * HW supports match on one Ethertype, the Ethertype following the last 7323 * VLAN tag of the packet (see PRM). 7324 * Set match on ethertype only if ETH header is not followed by VLAN. 7325 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 7326 * ethertype, and use ip_version field instead. 7327 * eCPRI over Ether layer will use type value 0xAEFE. 7328 */ 7329 if (eth_m->type == 0xFFFF) { 7330 /* Set cvlan_tag mask for any single\multi\un-tagged case. */ 7331 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); 7332 switch (eth_v->type) { 7333 case RTE_BE16(RTE_ETHER_TYPE_VLAN): 7334 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); 7335 return; 7336 case RTE_BE16(RTE_ETHER_TYPE_QINQ): 7337 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); 7338 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); 7339 return; 7340 case RTE_BE16(RTE_ETHER_TYPE_IPV4): 7341 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); 7342 return; 7343 case RTE_BE16(RTE_ETHER_TYPE_IPV6): 7344 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); 7345 return; 7346 default: 7347 break; 7348 } 7349 } 7350 if (eth_m->has_vlan) { 7351 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); 7352 if (eth_v->has_vlan) { 7353 /* 7354 * Here, when also has_more_vlan field in VLAN item is 7355 * not set, only single-tagged packets will be matched. 7356 */ 7357 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); 7358 return; 7359 } 7360 } 7361 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, 7362 rte_be_to_cpu_16(eth_m->type)); 7363 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype); 7364 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; 7365 } 7366 7367 /** 7368 * Add VLAN item to matcher and to the value. 7369 * 7370 * @param[in, out] dev_flow 7371 * Flow descriptor. 7372 * @param[in, out] matcher 7373 * Flow matcher. 7374 * @param[in, out] key 7375 * Flow matcher value. 7376 * @param[in] item 7377 * Flow pattern to translate. 7378 * @param[in] inner 7379 * Item is inner pattern. 7380 */ 7381 static void 7382 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, 7383 void *matcher, void *key, 7384 const struct rte_flow_item *item, 7385 int inner, uint32_t group) 7386 { 7387 const struct rte_flow_item_vlan *vlan_m = item->mask; 7388 const struct rte_flow_item_vlan *vlan_v = item->spec; 7389 void *hdrs_m; 7390 void *hdrs_v; 7391 uint16_t tci_m; 7392 uint16_t tci_v; 7393 7394 if (inner) { 7395 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, 7396 inner_headers); 7397 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7398 } else { 7399 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, 7400 outer_headers); 7401 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7402 /* 7403 * This is workaround, masks are not supported, 7404 * and pre-validated. 7405 */ 7406 if (vlan_v) 7407 dev_flow->handle->vf_vlan.tag = 7408 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; 7409 } 7410 /* 7411 * When VLAN item exists in flow, mark packet as tagged, 7412 * even if TCI is not specified. 7413 */ 7414 if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) { 7415 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); 7416 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); 7417 } 7418 if (!vlan_v) 7419 return; 7420 if (!vlan_m) 7421 vlan_m = &rte_flow_item_vlan_mask; 7422 tci_m = rte_be_to_cpu_16(vlan_m->tci); 7423 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); 7424 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m); 7425 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v); 7426 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12); 7427 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12); 7428 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13); 7429 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13); 7430 /* 7431 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 7432 * ethertype, and use ip_version field instead. 7433 */ 7434 if (vlan_m->inner_type == 0xFFFF) { 7435 switch (vlan_v->inner_type) { 7436 case RTE_BE16(RTE_ETHER_TYPE_VLAN): 7437 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); 7438 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); 7439 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); 7440 return; 7441 case RTE_BE16(RTE_ETHER_TYPE_IPV4): 7442 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); 7443 return; 7444 case RTE_BE16(RTE_ETHER_TYPE_IPV6): 7445 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); 7446 return; 7447 default: 7448 break; 7449 } 7450 } 7451 if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) { 7452 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); 7453 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); 7454 /* Only one vlan_tag bit can be set. */ 7455 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); 7456 return; 7457 } 7458 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, 7459 rte_be_to_cpu_16(vlan_m->inner_type)); 7460 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype, 7461 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type)); 7462 } 7463 7464 /** 7465 * Add IPV4 item to matcher and to the value. 7466 * 7467 * @param[in, out] matcher 7468 * Flow matcher. 7469 * @param[in, out] key 7470 * Flow matcher value. 7471 * @param[in] item 7472 * Flow pattern to translate. 7473 * @param[in] inner 7474 * Item is inner pattern. 7475 * @param[in] group 7476 * The group to insert the rule. 7477 */ 7478 static void 7479 flow_dv_translate_item_ipv4(void *matcher, void *key, 7480 const struct rte_flow_item *item, 7481 int inner, uint32_t group) 7482 { 7483 const struct rte_flow_item_ipv4 *ipv4_m = item->mask; 7484 const struct rte_flow_item_ipv4 *ipv4_v = item->spec; 7485 const struct rte_flow_item_ipv4 nic_mask = { 7486 .hdr = { 7487 .src_addr = RTE_BE32(0xffffffff), 7488 .dst_addr = RTE_BE32(0xffffffff), 7489 .type_of_service = 0xff, 7490 .next_proto_id = 0xff, 7491 .time_to_live = 0xff, 7492 }, 7493 }; 7494 void *headers_m; 7495 void *headers_v; 7496 char *l24_m; 7497 char *l24_v; 7498 uint8_t tos; 7499 7500 if (inner) { 7501 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7502 inner_headers); 7503 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7504 } else { 7505 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7506 outer_headers); 7507 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7508 } 7509 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 7510 if (!ipv4_v) 7511 return; 7512 if (!ipv4_m) 7513 ipv4_m = &nic_mask; 7514 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 7515 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 7516 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 7517 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 7518 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr; 7519 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr; 7520 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 7521 src_ipv4_src_ipv6.ipv4_layout.ipv4); 7522 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 7523 src_ipv4_src_ipv6.ipv4_layout.ipv4); 7524 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr; 7525 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr; 7526 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service; 7527 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, 7528 ipv4_m->hdr.type_of_service); 7529 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos); 7530 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, 7531 ipv4_m->hdr.type_of_service >> 2); 7532 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2); 7533 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 7534 ipv4_m->hdr.next_proto_id); 7535 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 7536 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); 7537 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 7538 ipv4_m->hdr.time_to_live); 7539 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 7540 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); 7541 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 7542 !!(ipv4_m->hdr.fragment_offset)); 7543 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 7544 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset)); 7545 } 7546 7547 /** 7548 * Add IPV6 item to matcher and to the value. 7549 * 7550 * @param[in, out] matcher 7551 * Flow matcher. 7552 * @param[in, out] key 7553 * Flow matcher value. 7554 * @param[in] item 7555 * Flow pattern to translate. 7556 * @param[in] inner 7557 * Item is inner pattern. 7558 * @param[in] group 7559 * The group to insert the rule. 7560 */ 7561 static void 7562 flow_dv_translate_item_ipv6(void *matcher, void *key, 7563 const struct rte_flow_item *item, 7564 int inner, uint32_t group) 7565 { 7566 const struct rte_flow_item_ipv6 *ipv6_m = item->mask; 7567 const struct rte_flow_item_ipv6 *ipv6_v = item->spec; 7568 const struct rte_flow_item_ipv6 nic_mask = { 7569 .hdr = { 7570 .src_addr = 7571 "\xff\xff\xff\xff\xff\xff\xff\xff" 7572 "\xff\xff\xff\xff\xff\xff\xff\xff", 7573 .dst_addr = 7574 "\xff\xff\xff\xff\xff\xff\xff\xff" 7575 "\xff\xff\xff\xff\xff\xff\xff\xff", 7576 .vtc_flow = RTE_BE32(0xffffffff), 7577 .proto = 0xff, 7578 .hop_limits = 0xff, 7579 }, 7580 }; 7581 void *headers_m; 7582 void *headers_v; 7583 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7584 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7585 char *l24_m; 7586 char *l24_v; 7587 uint32_t vtc_m; 7588 uint32_t vtc_v; 7589 int i; 7590 int size; 7591 7592 if (inner) { 7593 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7594 inner_headers); 7595 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7596 } else { 7597 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7598 outer_headers); 7599 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7600 } 7601 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 7602 if (!ipv6_v) 7603 return; 7604 if (!ipv6_m) 7605 ipv6_m = &nic_mask; 7606 size = sizeof(ipv6_m->hdr.dst_addr); 7607 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 7608 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 7609 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 7610 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 7611 memcpy(l24_m, ipv6_m->hdr.dst_addr, size); 7612 for (i = 0; i < size; ++i) 7613 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i]; 7614 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 7615 src_ipv4_src_ipv6.ipv6_layout.ipv6); 7616 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 7617 src_ipv4_src_ipv6.ipv6_layout.ipv6); 7618 memcpy(l24_m, ipv6_m->hdr.src_addr, size); 7619 for (i = 0; i < size; ++i) 7620 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i]; 7621 /* TOS. */ 7622 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); 7623 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); 7624 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20); 7625 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20); 7626 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22); 7627 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22); 7628 /* Label. */ 7629 if (inner) { 7630 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label, 7631 vtc_m); 7632 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label, 7633 vtc_v); 7634 } else { 7635 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label, 7636 vtc_m); 7637 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label, 7638 vtc_v); 7639 } 7640 /* Protocol. */ 7641 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 7642 ipv6_m->hdr.proto); 7643 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 7644 ipv6_v->hdr.proto & ipv6_m->hdr.proto); 7645 /* Hop limit. */ 7646 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 7647 ipv6_m->hdr.hop_limits); 7648 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 7649 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); 7650 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 7651 !!(ipv6_m->has_frag_ext)); 7652 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 7653 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext)); 7654 } 7655 7656 /** 7657 * Add IPV6 fragment extension item to matcher and to the value. 7658 * 7659 * @param[in, out] matcher 7660 * Flow matcher. 7661 * @param[in, out] key 7662 * Flow matcher value. 7663 * @param[in] item 7664 * Flow pattern to translate. 7665 * @param[in] inner 7666 * Item is inner pattern. 7667 */ 7668 static void 7669 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key, 7670 const struct rte_flow_item *item, 7671 int inner) 7672 { 7673 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask; 7674 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec; 7675 const struct rte_flow_item_ipv6_frag_ext nic_mask = { 7676 .hdr = { 7677 .next_header = 0xff, 7678 .frag_data = RTE_BE16(0xffff), 7679 }, 7680 }; 7681 void *headers_m; 7682 void *headers_v; 7683 7684 if (inner) { 7685 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7686 inner_headers); 7687 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7688 } else { 7689 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7690 outer_headers); 7691 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7692 } 7693 /* IPv6 fragment extension item exists, so packet is IP fragment. */ 7694 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); 7695 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1); 7696 if (!ipv6_frag_ext_v) 7697 return; 7698 if (!ipv6_frag_ext_m) 7699 ipv6_frag_ext_m = &nic_mask; 7700 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 7701 ipv6_frag_ext_m->hdr.next_header); 7702 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 7703 ipv6_frag_ext_v->hdr.next_header & 7704 ipv6_frag_ext_m->hdr.next_header); 7705 } 7706 7707 /** 7708 * Add TCP item to matcher and to the value. 7709 * 7710 * @param[in, out] matcher 7711 * Flow matcher. 7712 * @param[in, out] key 7713 * Flow matcher value. 7714 * @param[in] item 7715 * Flow pattern to translate. 7716 * @param[in] inner 7717 * Item is inner pattern. 7718 */ 7719 static void 7720 flow_dv_translate_item_tcp(void *matcher, void *key, 7721 const struct rte_flow_item *item, 7722 int inner) 7723 { 7724 const struct rte_flow_item_tcp *tcp_m = item->mask; 7725 const struct rte_flow_item_tcp *tcp_v = item->spec; 7726 void *headers_m; 7727 void *headers_v; 7728 7729 if (inner) { 7730 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7731 inner_headers); 7732 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7733 } else { 7734 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7735 outer_headers); 7736 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7737 } 7738 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 7739 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); 7740 if (!tcp_v) 7741 return; 7742 if (!tcp_m) 7743 tcp_m = &rte_flow_item_tcp_mask; 7744 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, 7745 rte_be_to_cpu_16(tcp_m->hdr.src_port)); 7746 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 7747 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port)); 7748 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport, 7749 rte_be_to_cpu_16(tcp_m->hdr.dst_port)); 7750 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 7751 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port)); 7752 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags, 7753 tcp_m->hdr.tcp_flags); 7754 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 7755 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); 7756 } 7757 7758 /** 7759 * Add UDP item to matcher and to the value. 7760 * 7761 * @param[in, out] matcher 7762 * Flow matcher. 7763 * @param[in, out] key 7764 * Flow matcher value. 7765 * @param[in] item 7766 * Flow pattern to translate. 7767 * @param[in] inner 7768 * Item is inner pattern. 7769 */ 7770 static void 7771 flow_dv_translate_item_udp(void *matcher, void *key, 7772 const struct rte_flow_item *item, 7773 int inner) 7774 { 7775 const struct rte_flow_item_udp *udp_m = item->mask; 7776 const struct rte_flow_item_udp *udp_v = item->spec; 7777 void *headers_m; 7778 void *headers_v; 7779 7780 if (inner) { 7781 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7782 inner_headers); 7783 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7784 } else { 7785 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7786 outer_headers); 7787 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7788 } 7789 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 7790 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 7791 if (!udp_v) 7792 return; 7793 if (!udp_m) 7794 udp_m = &rte_flow_item_udp_mask; 7795 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, 7796 rte_be_to_cpu_16(udp_m->hdr.src_port)); 7797 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 7798 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port)); 7799 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 7800 rte_be_to_cpu_16(udp_m->hdr.dst_port)); 7801 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 7802 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port)); 7803 } 7804 7805 /** 7806 * Add GRE optional Key item to matcher and to the value. 7807 * 7808 * @param[in, out] matcher 7809 * Flow matcher. 7810 * @param[in, out] key 7811 * Flow matcher value. 7812 * @param[in] item 7813 * Flow pattern to translate. 7814 * @param[in] inner 7815 * Item is inner pattern. 7816 */ 7817 static void 7818 flow_dv_translate_item_gre_key(void *matcher, void *key, 7819 const struct rte_flow_item *item) 7820 { 7821 const rte_be32_t *key_m = item->mask; 7822 const rte_be32_t *key_v = item->spec; 7823 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7824 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7825 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 7826 7827 /* GRE K bit must be on and should already be validated */ 7828 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1); 7829 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1); 7830 if (!key_v) 7831 return; 7832 if (!key_m) 7833 key_m = &gre_key_default_mask; 7834 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h, 7835 rte_be_to_cpu_32(*key_m) >> 8); 7836 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h, 7837 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8); 7838 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l, 7839 rte_be_to_cpu_32(*key_m) & 0xFF); 7840 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l, 7841 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF); 7842 } 7843 7844 /** 7845 * Add GRE item to matcher and to the value. 7846 * 7847 * @param[in, out] matcher 7848 * Flow matcher. 7849 * @param[in, out] key 7850 * Flow matcher value. 7851 * @param[in] item 7852 * Flow pattern to translate. 7853 * @param[in] inner 7854 * Item is inner pattern. 7855 */ 7856 static void 7857 flow_dv_translate_item_gre(void *matcher, void *key, 7858 const struct rte_flow_item *item, 7859 int inner) 7860 { 7861 const struct rte_flow_item_gre *gre_m = item->mask; 7862 const struct rte_flow_item_gre *gre_v = item->spec; 7863 void *headers_m; 7864 void *headers_v; 7865 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7866 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7867 struct { 7868 union { 7869 __extension__ 7870 struct { 7871 uint16_t version:3; 7872 uint16_t rsvd0:9; 7873 uint16_t s_present:1; 7874 uint16_t k_present:1; 7875 uint16_t rsvd_bit1:1; 7876 uint16_t c_present:1; 7877 }; 7878 uint16_t value; 7879 }; 7880 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; 7881 7882 if (inner) { 7883 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7884 inner_headers); 7885 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7886 } else { 7887 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7888 outer_headers); 7889 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7890 } 7891 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 7892 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); 7893 if (!gre_v) 7894 return; 7895 if (!gre_m) 7896 gre_m = &rte_flow_item_gre_mask; 7897 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 7898 rte_be_to_cpu_16(gre_m->protocol)); 7899 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 7900 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); 7901 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); 7902 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); 7903 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, 7904 gre_crks_rsvd0_ver_m.c_present); 7905 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, 7906 gre_crks_rsvd0_ver_v.c_present & 7907 gre_crks_rsvd0_ver_m.c_present); 7908 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 7909 gre_crks_rsvd0_ver_m.k_present); 7910 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 7911 gre_crks_rsvd0_ver_v.k_present & 7912 gre_crks_rsvd0_ver_m.k_present); 7913 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present, 7914 gre_crks_rsvd0_ver_m.s_present); 7915 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, 7916 gre_crks_rsvd0_ver_v.s_present & 7917 gre_crks_rsvd0_ver_m.s_present); 7918 } 7919 7920 /** 7921 * Add NVGRE item to matcher and to the value. 7922 * 7923 * @param[in, out] matcher 7924 * Flow matcher. 7925 * @param[in, out] key 7926 * Flow matcher value. 7927 * @param[in] item 7928 * Flow pattern to translate. 7929 * @param[in] inner 7930 * Item is inner pattern. 7931 */ 7932 static void 7933 flow_dv_translate_item_nvgre(void *matcher, void *key, 7934 const struct rte_flow_item *item, 7935 int inner) 7936 { 7937 const struct rte_flow_item_nvgre *nvgre_m = item->mask; 7938 const struct rte_flow_item_nvgre *nvgre_v = item->spec; 7939 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7940 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7941 const char *tni_flow_id_m; 7942 const char *tni_flow_id_v; 7943 char *gre_key_m; 7944 char *gre_key_v; 7945 int size; 7946 int i; 7947 7948 /* For NVGRE, GRE header fields must be set with defined values. */ 7949 const struct rte_flow_item_gre gre_spec = { 7950 .c_rsvd0_ver = RTE_BE16(0x2000), 7951 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB) 7952 }; 7953 const struct rte_flow_item_gre gre_mask = { 7954 .c_rsvd0_ver = RTE_BE16(0xB000), 7955 .protocol = RTE_BE16(UINT16_MAX), 7956 }; 7957 const struct rte_flow_item gre_item = { 7958 .spec = &gre_spec, 7959 .mask = &gre_mask, 7960 .last = NULL, 7961 }; 7962 flow_dv_translate_item_gre(matcher, key, &gre_item, inner); 7963 if (!nvgre_v) 7964 return; 7965 if (!nvgre_m) 7966 nvgre_m = &rte_flow_item_nvgre_mask; 7967 tni_flow_id_m = (const char *)nvgre_m->tni; 7968 tni_flow_id_v = (const char *)nvgre_v->tni; 7969 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); 7970 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); 7971 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); 7972 memcpy(gre_key_m, tni_flow_id_m, size); 7973 for (i = 0; i < size; ++i) 7974 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; 7975 } 7976 7977 /** 7978 * Add VXLAN item to matcher and to the value. 7979 * 7980 * @param[in, out] matcher 7981 * Flow matcher. 7982 * @param[in, out] key 7983 * Flow matcher value. 7984 * @param[in] item 7985 * Flow pattern to translate. 7986 * @param[in] inner 7987 * Item is inner pattern. 7988 */ 7989 static void 7990 flow_dv_translate_item_vxlan(void *matcher, void *key, 7991 const struct rte_flow_item *item, 7992 int inner) 7993 { 7994 const struct rte_flow_item_vxlan *vxlan_m = item->mask; 7995 const struct rte_flow_item_vxlan *vxlan_v = item->spec; 7996 void *headers_m; 7997 void *headers_v; 7998 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7999 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8000 char *vni_m; 8001 char *vni_v; 8002 uint16_t dport; 8003 int size; 8004 int i; 8005 8006 if (inner) { 8007 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8008 inner_headers); 8009 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8010 } else { 8011 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8012 outer_headers); 8013 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8014 } 8015 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 8016 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 8017 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 8018 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 8019 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 8020 } 8021 if (!vxlan_v) 8022 return; 8023 if (!vxlan_m) 8024 vxlan_m = &rte_flow_item_vxlan_mask; 8025 size = sizeof(vxlan_m->vni); 8026 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); 8027 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); 8028 memcpy(vni_m, vxlan_m->vni, size); 8029 for (i = 0; i < size; ++i) 8030 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 8031 } 8032 8033 /** 8034 * Add VXLAN-GPE item to matcher and to the value. 8035 * 8036 * @param[in, out] matcher 8037 * Flow matcher. 8038 * @param[in, out] key 8039 * Flow matcher value. 8040 * @param[in] item 8041 * Flow pattern to translate. 8042 * @param[in] inner 8043 * Item is inner pattern. 8044 */ 8045 8046 static void 8047 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, 8048 const struct rte_flow_item *item, int inner) 8049 { 8050 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; 8051 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; 8052 void *headers_m; 8053 void *headers_v; 8054 void *misc_m = 8055 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); 8056 void *misc_v = 8057 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8058 char *vni_m; 8059 char *vni_v; 8060 uint16_t dport; 8061 int size; 8062 int i; 8063 uint8_t flags_m = 0xff; 8064 uint8_t flags_v = 0xc; 8065 8066 if (inner) { 8067 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8068 inner_headers); 8069 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8070 } else { 8071 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8072 outer_headers); 8073 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8074 } 8075 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 8076 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 8077 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 8078 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 8079 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 8080 } 8081 if (!vxlan_v) 8082 return; 8083 if (!vxlan_m) 8084 vxlan_m = &rte_flow_item_vxlan_gpe_mask; 8085 size = sizeof(vxlan_m->vni); 8086 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); 8087 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); 8088 memcpy(vni_m, vxlan_m->vni, size); 8089 for (i = 0; i < size; ++i) 8090 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 8091 if (vxlan_m->flags) { 8092 flags_m = vxlan_m->flags; 8093 flags_v = vxlan_v->flags; 8094 } 8095 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); 8096 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); 8097 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, 8098 vxlan_m->protocol); 8099 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, 8100 vxlan_v->protocol); 8101 } 8102 8103 /** 8104 * Add Geneve item to matcher and to the value. 8105 * 8106 * @param[in, out] matcher 8107 * Flow matcher. 8108 * @param[in, out] key 8109 * Flow matcher value. 8110 * @param[in] item 8111 * Flow pattern to translate. 8112 * @param[in] inner 8113 * Item is inner pattern. 8114 */ 8115 8116 static void 8117 flow_dv_translate_item_geneve(void *matcher, void *key, 8118 const struct rte_flow_item *item, int inner) 8119 { 8120 const struct rte_flow_item_geneve *geneve_m = item->mask; 8121 const struct rte_flow_item_geneve *geneve_v = item->spec; 8122 void *headers_m; 8123 void *headers_v; 8124 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 8125 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8126 uint16_t dport; 8127 uint16_t gbhdr_m; 8128 uint16_t gbhdr_v; 8129 char *vni_m; 8130 char *vni_v; 8131 size_t size, i; 8132 8133 if (inner) { 8134 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8135 inner_headers); 8136 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8137 } else { 8138 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8139 outer_headers); 8140 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8141 } 8142 dport = MLX5_UDP_PORT_GENEVE; 8143 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 8144 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 8145 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 8146 } 8147 if (!geneve_v) 8148 return; 8149 if (!geneve_m) 8150 geneve_m = &rte_flow_item_geneve_mask; 8151 size = sizeof(geneve_m->vni); 8152 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); 8153 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); 8154 memcpy(vni_m, geneve_m->vni, size); 8155 for (i = 0; i < size; ++i) 8156 vni_v[i] = vni_m[i] & geneve_v->vni[i]; 8157 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, 8158 rte_be_to_cpu_16(geneve_m->protocol)); 8159 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, 8160 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); 8161 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); 8162 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); 8163 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, 8164 MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 8165 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, 8166 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 8167 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, 8168 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 8169 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, 8170 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & 8171 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 8172 } 8173 8174 /** 8175 * Create Geneve TLV option resource. 8176 * 8177 * @param dev[in, out] 8178 * Pointer to rte_eth_dev structure. 8179 * @param[in, out] tag_be24 8180 * Tag value in big endian then R-shift 8. 8181 * @parm[in, out] dev_flow 8182 * Pointer to the dev_flow. 8183 * @param[out] error 8184 * pointer to error structure. 8185 * 8186 * @return 8187 * 0 on success otherwise -errno and errno is set. 8188 */ 8189 8190 int 8191 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, 8192 const struct rte_flow_item *item, 8193 struct rte_flow_error *error) 8194 { 8195 struct mlx5_priv *priv = dev->data->dev_private; 8196 struct mlx5_dev_ctx_shared *sh = priv->sh; 8197 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource = 8198 sh->geneve_tlv_option_resource; 8199 struct mlx5_devx_obj *obj; 8200 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec; 8201 int ret = 0; 8202 8203 if (!geneve_opt_v) 8204 return -1; 8205 rte_spinlock_lock(&sh->geneve_tlv_opt_sl); 8206 if (geneve_opt_resource != NULL) { 8207 if (geneve_opt_resource->option_class == 8208 geneve_opt_v->option_class && 8209 geneve_opt_resource->option_type == 8210 geneve_opt_v->option_type && 8211 geneve_opt_resource->length == 8212 geneve_opt_v->option_len) { 8213 /* We already have GENVE TLV option obj allocated. */ 8214 __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, 8215 __ATOMIC_RELAXED); 8216 } else { 8217 ret = rte_flow_error_set(error, ENOMEM, 8218 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8219 "Only one GENEVE TLV option supported"); 8220 goto exit; 8221 } 8222 } else { 8223 /* Create a GENEVE TLV object and resource. */ 8224 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx, 8225 geneve_opt_v->option_class, 8226 geneve_opt_v->option_type, 8227 geneve_opt_v->option_len); 8228 if (!obj) { 8229 ret = rte_flow_error_set(error, ENODATA, 8230 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8231 "Failed to create GENEVE TLV Devx object"); 8232 goto exit; 8233 } 8234 sh->geneve_tlv_option_resource = 8235 mlx5_malloc(MLX5_MEM_ZERO, 8236 sizeof(*geneve_opt_resource), 8237 0, SOCKET_ID_ANY); 8238 if (!sh->geneve_tlv_option_resource) { 8239 claim_zero(mlx5_devx_cmd_destroy(obj)); 8240 ret = rte_flow_error_set(error, ENOMEM, 8241 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8242 "GENEVE TLV object memory allocation failed"); 8243 goto exit; 8244 } 8245 geneve_opt_resource = sh->geneve_tlv_option_resource; 8246 geneve_opt_resource->obj = obj; 8247 geneve_opt_resource->option_class = geneve_opt_v->option_class; 8248 geneve_opt_resource->option_type = geneve_opt_v->option_type; 8249 geneve_opt_resource->length = geneve_opt_v->option_len; 8250 __atomic_store_n(&geneve_opt_resource->refcnt, 1, 8251 __ATOMIC_RELAXED); 8252 } 8253 exit: 8254 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 8255 return ret; 8256 } 8257 8258 /** 8259 * Add Geneve TLV option item to matcher. 8260 * 8261 * @param[in, out] dev 8262 * Pointer to rte_eth_dev structure. 8263 * @param[in, out] matcher 8264 * Flow matcher. 8265 * @param[in, out] key 8266 * Flow matcher value. 8267 * @param[in] item 8268 * Flow pattern to translate. 8269 * @param[out] error 8270 * Pointer to error structure. 8271 */ 8272 static int 8273 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher, 8274 void *key, const struct rte_flow_item *item, 8275 struct rte_flow_error *error) 8276 { 8277 const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask; 8278 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec; 8279 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 8280 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8281 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 8282 misc_parameters_3); 8283 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8284 rte_be32_t opt_data_key = 0, opt_data_mask = 0; 8285 int ret = 0; 8286 8287 if (!geneve_opt_v) 8288 return -1; 8289 if (!geneve_opt_m) 8290 geneve_opt_m = &rte_flow_item_geneve_opt_mask; 8291 ret = flow_dev_geneve_tlv_option_resource_register(dev, item, 8292 error); 8293 if (ret) { 8294 DRV_LOG(ERR, "Failed to create geneve_tlv_obj"); 8295 return ret; 8296 } 8297 /* 8298 * Set the option length in GENEVE header if not requested. 8299 * The GENEVE TLV option length is expressed by the option length field 8300 * in the GENEVE header. 8301 * If the option length was not requested but the GENEVE TLV option item 8302 * is present we set the option length field implicitly. 8303 */ 8304 if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) { 8305 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, 8306 MLX5_GENEVE_OPTLEN_MASK); 8307 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, 8308 geneve_opt_v->option_len + 1); 8309 } 8310 /* Set the data. */ 8311 if (geneve_opt_v->data) { 8312 memcpy(&opt_data_key, geneve_opt_v->data, 8313 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), 8314 sizeof(opt_data_key))); 8315 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <= 8316 sizeof(opt_data_key)); 8317 memcpy(&opt_data_mask, geneve_opt_m->data, 8318 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), 8319 sizeof(opt_data_mask))); 8320 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <= 8321 sizeof(opt_data_mask)); 8322 MLX5_SET(fte_match_set_misc3, misc3_m, 8323 geneve_tlv_option_0_data, 8324 rte_be_to_cpu_32(opt_data_mask)); 8325 MLX5_SET(fte_match_set_misc3, misc3_v, 8326 geneve_tlv_option_0_data, 8327 rte_be_to_cpu_32(opt_data_key & opt_data_mask)); 8328 } 8329 return ret; 8330 } 8331 8332 /** 8333 * Add MPLS item to matcher and to the value. 8334 * 8335 * @param[in, out] matcher 8336 * Flow matcher. 8337 * @param[in, out] key 8338 * Flow matcher value. 8339 * @param[in] item 8340 * Flow pattern to translate. 8341 * @param[in] prev_layer 8342 * The protocol layer indicated in previous item. 8343 * @param[in] inner 8344 * Item is inner pattern. 8345 */ 8346 static void 8347 flow_dv_translate_item_mpls(void *matcher, void *key, 8348 const struct rte_flow_item *item, 8349 uint64_t prev_layer, 8350 int inner) 8351 { 8352 const uint32_t *in_mpls_m = item->mask; 8353 const uint32_t *in_mpls_v = item->spec; 8354 uint32_t *out_mpls_m = 0; 8355 uint32_t *out_mpls_v = 0; 8356 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 8357 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8358 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher, 8359 misc_parameters_2); 8360 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 8361 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); 8362 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8363 8364 switch (prev_layer) { 8365 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 8366 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); 8367 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 8368 MLX5_UDP_PORT_MPLS); 8369 break; 8370 case MLX5_FLOW_LAYER_GRE: 8371 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); 8372 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 8373 RTE_ETHER_TYPE_MPLS); 8374 break; 8375 default: 8376 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 8377 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 8378 IPPROTO_MPLS); 8379 break; 8380 } 8381 if (!in_mpls_v) 8382 return; 8383 if (!in_mpls_m) 8384 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask; 8385 switch (prev_layer) { 8386 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 8387 out_mpls_m = 8388 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 8389 outer_first_mpls_over_udp); 8390 out_mpls_v = 8391 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 8392 outer_first_mpls_over_udp); 8393 break; 8394 case MLX5_FLOW_LAYER_GRE: 8395 out_mpls_m = 8396 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 8397 outer_first_mpls_over_gre); 8398 out_mpls_v = 8399 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 8400 outer_first_mpls_over_gre); 8401 break; 8402 default: 8403 /* Inner MPLS not over GRE is not supported. */ 8404 if (!inner) { 8405 out_mpls_m = 8406 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 8407 misc2_m, 8408 outer_first_mpls); 8409 out_mpls_v = 8410 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 8411 misc2_v, 8412 outer_first_mpls); 8413 } 8414 break; 8415 } 8416 if (out_mpls_m && out_mpls_v) { 8417 *out_mpls_m = *in_mpls_m; 8418 *out_mpls_v = *in_mpls_v & *in_mpls_m; 8419 } 8420 } 8421 8422 /** 8423 * Add metadata register item to matcher 8424 * 8425 * @param[in, out] matcher 8426 * Flow matcher. 8427 * @param[in, out] key 8428 * Flow matcher value. 8429 * @param[in] reg_type 8430 * Type of device metadata register 8431 * @param[in] value 8432 * Register value 8433 * @param[in] mask 8434 * Register mask 8435 */ 8436 static void 8437 flow_dv_match_meta_reg(void *matcher, void *key, 8438 enum modify_reg reg_type, 8439 uint32_t data, uint32_t mask) 8440 { 8441 void *misc2_m = 8442 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); 8443 void *misc2_v = 8444 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 8445 uint32_t temp; 8446 8447 data &= mask; 8448 switch (reg_type) { 8449 case REG_A: 8450 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask); 8451 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data); 8452 break; 8453 case REG_B: 8454 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask); 8455 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data); 8456 break; 8457 case REG_C_0: 8458 /* 8459 * The metadata register C0 field might be divided into 8460 * source vport index and META item value, we should set 8461 * this field according to specified mask, not as whole one. 8462 */ 8463 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0); 8464 temp |= mask; 8465 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp); 8466 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0); 8467 temp &= ~mask; 8468 temp |= data; 8469 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp); 8470 break; 8471 case REG_C_1: 8472 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask); 8473 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data); 8474 break; 8475 case REG_C_2: 8476 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask); 8477 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data); 8478 break; 8479 case REG_C_3: 8480 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask); 8481 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data); 8482 break; 8483 case REG_C_4: 8484 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask); 8485 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data); 8486 break; 8487 case REG_C_5: 8488 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask); 8489 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data); 8490 break; 8491 case REG_C_6: 8492 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask); 8493 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data); 8494 break; 8495 case REG_C_7: 8496 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask); 8497 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data); 8498 break; 8499 default: 8500 MLX5_ASSERT(false); 8501 break; 8502 } 8503 } 8504 8505 /** 8506 * Add MARK item to matcher 8507 * 8508 * @param[in] dev 8509 * The device to configure through. 8510 * @param[in, out] matcher 8511 * Flow matcher. 8512 * @param[in, out] key 8513 * Flow matcher value. 8514 * @param[in] item 8515 * Flow pattern to translate. 8516 */ 8517 static void 8518 flow_dv_translate_item_mark(struct rte_eth_dev *dev, 8519 void *matcher, void *key, 8520 const struct rte_flow_item *item) 8521 { 8522 struct mlx5_priv *priv = dev->data->dev_private; 8523 const struct rte_flow_item_mark *mark; 8524 uint32_t value; 8525 uint32_t mask; 8526 8527 mark = item->mask ? (const void *)item->mask : 8528 &rte_flow_item_mark_mask; 8529 mask = mark->id & priv->sh->dv_mark_mask; 8530 mark = (const void *)item->spec; 8531 MLX5_ASSERT(mark); 8532 value = mark->id & priv->sh->dv_mark_mask & mask; 8533 if (mask) { 8534 enum modify_reg reg; 8535 8536 /* Get the metadata register index for the mark. */ 8537 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL); 8538 MLX5_ASSERT(reg > 0); 8539 if (reg == REG_C_0) { 8540 struct mlx5_priv *priv = dev->data->dev_private; 8541 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 8542 uint32_t shl_c0 = rte_bsf32(msk_c0); 8543 8544 mask &= msk_c0; 8545 mask <<= shl_c0; 8546 value <<= shl_c0; 8547 } 8548 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 8549 } 8550 } 8551 8552 /** 8553 * Add META item to matcher 8554 * 8555 * @param[in] dev 8556 * The devich to configure through. 8557 * @param[in, out] matcher 8558 * Flow matcher. 8559 * @param[in, out] key 8560 * Flow matcher value. 8561 * @param[in] attr 8562 * Attributes of flow that includes this item. 8563 * @param[in] item 8564 * Flow pattern to translate. 8565 */ 8566 static void 8567 flow_dv_translate_item_meta(struct rte_eth_dev *dev, 8568 void *matcher, void *key, 8569 const struct rte_flow_attr *attr, 8570 const struct rte_flow_item *item) 8571 { 8572 const struct rte_flow_item_meta *meta_m; 8573 const struct rte_flow_item_meta *meta_v; 8574 8575 meta_m = (const void *)item->mask; 8576 if (!meta_m) 8577 meta_m = &rte_flow_item_meta_mask; 8578 meta_v = (const void *)item->spec; 8579 if (meta_v) { 8580 int reg; 8581 uint32_t value = meta_v->data; 8582 uint32_t mask = meta_m->data; 8583 8584 reg = flow_dv_get_metadata_reg(dev, attr, NULL); 8585 if (reg < 0) 8586 return; 8587 MLX5_ASSERT(reg != REG_NON); 8588 /* 8589 * In datapath code there is no endianness 8590 * coversions for perfromance reasons, all 8591 * pattern conversions are done in rte_flow. 8592 */ 8593 value = rte_cpu_to_be_32(value); 8594 mask = rte_cpu_to_be_32(mask); 8595 if (reg == REG_C_0) { 8596 struct mlx5_priv *priv = dev->data->dev_private; 8597 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 8598 uint32_t shl_c0 = rte_bsf32(msk_c0); 8599 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 8600 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); 8601 8602 value >>= shr_c0; 8603 mask >>= shr_c0; 8604 #endif 8605 value <<= shl_c0; 8606 mask <<= shl_c0; 8607 MLX5_ASSERT(msk_c0); 8608 MLX5_ASSERT(!(~msk_c0 & mask)); 8609 } 8610 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 8611 } 8612 } 8613 8614 /** 8615 * Add vport metadata Reg C0 item to matcher 8616 * 8617 * @param[in, out] matcher 8618 * Flow matcher. 8619 * @param[in, out] key 8620 * Flow matcher value. 8621 * @param[in] reg 8622 * Flow pattern to translate. 8623 */ 8624 static void 8625 flow_dv_translate_item_meta_vport(void *matcher, void *key, 8626 uint32_t value, uint32_t mask) 8627 { 8628 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask); 8629 } 8630 8631 /** 8632 * Add tag item to matcher 8633 * 8634 * @param[in] dev 8635 * The devich to configure through. 8636 * @param[in, out] matcher 8637 * Flow matcher. 8638 * @param[in, out] key 8639 * Flow matcher value. 8640 * @param[in] item 8641 * Flow pattern to translate. 8642 */ 8643 static void 8644 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev, 8645 void *matcher, void *key, 8646 const struct rte_flow_item *item) 8647 { 8648 const struct mlx5_rte_flow_item_tag *tag_v = item->spec; 8649 const struct mlx5_rte_flow_item_tag *tag_m = item->mask; 8650 uint32_t mask, value; 8651 8652 MLX5_ASSERT(tag_v); 8653 value = tag_v->data; 8654 mask = tag_m ? tag_m->data : UINT32_MAX; 8655 if (tag_v->id == REG_C_0) { 8656 struct mlx5_priv *priv = dev->data->dev_private; 8657 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 8658 uint32_t shl_c0 = rte_bsf32(msk_c0); 8659 8660 mask &= msk_c0; 8661 mask <<= shl_c0; 8662 value <<= shl_c0; 8663 } 8664 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask); 8665 } 8666 8667 /** 8668 * Add TAG item to matcher 8669 * 8670 * @param[in] dev 8671 * The devich to configure through. 8672 * @param[in, out] matcher 8673 * Flow matcher. 8674 * @param[in, out] key 8675 * Flow matcher value. 8676 * @param[in] item 8677 * Flow pattern to translate. 8678 */ 8679 static void 8680 flow_dv_translate_item_tag(struct rte_eth_dev *dev, 8681 void *matcher, void *key, 8682 const struct rte_flow_item *item) 8683 { 8684 const struct rte_flow_item_tag *tag_v = item->spec; 8685 const struct rte_flow_item_tag *tag_m = item->mask; 8686 enum modify_reg reg; 8687 8688 MLX5_ASSERT(tag_v); 8689 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask; 8690 /* Get the metadata register index for the tag. */ 8691 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL); 8692 MLX5_ASSERT(reg > 0); 8693 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data); 8694 } 8695 8696 /** 8697 * Add source vport match to the specified matcher. 8698 * 8699 * @param[in, out] matcher 8700 * Flow matcher. 8701 * @param[in, out] key 8702 * Flow matcher value. 8703 * @param[in] port 8704 * Source vport value to match 8705 * @param[in] mask 8706 * Mask 8707 */ 8708 static void 8709 flow_dv_translate_item_source_vport(void *matcher, void *key, 8710 int16_t port, uint16_t mask) 8711 { 8712 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 8713 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8714 8715 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); 8716 MLX5_SET(fte_match_set_misc, misc_v, source_port, port); 8717 } 8718 8719 /** 8720 * Translate port-id item to eswitch match on port-id. 8721 * 8722 * @param[in] dev 8723 * The devich to configure through. 8724 * @param[in, out] matcher 8725 * Flow matcher. 8726 * @param[in, out] key 8727 * Flow matcher value. 8728 * @param[in] item 8729 * Flow pattern to translate. 8730 * @param[in] 8731 * Flow attributes. 8732 * 8733 * @return 8734 * 0 on success, a negative errno value otherwise. 8735 */ 8736 static int 8737 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, 8738 void *key, const struct rte_flow_item *item, 8739 const struct rte_flow_attr *attr) 8740 { 8741 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; 8742 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; 8743 struct mlx5_priv *priv; 8744 uint16_t mask, id; 8745 8746 mask = pid_m ? pid_m->id : 0xffff; 8747 id = pid_v ? pid_v->id : dev->data->port_id; 8748 priv = mlx5_port_to_eswitch_info(id, item == NULL); 8749 if (!priv) 8750 return -rte_errno; 8751 /* 8752 * Translate to vport field or to metadata, depending on mode. 8753 * Kernel can use either misc.source_port or half of C0 metadata 8754 * register. 8755 */ 8756 if (priv->vport_meta_mask) { 8757 /* 8758 * Provide the hint for SW steering library 8759 * to insert the flow into ingress domain and 8760 * save the extra vport match. 8761 */ 8762 if (mask == 0xffff && priv->vport_id == 0xffff && 8763 priv->pf_bond < 0 && attr->transfer) 8764 flow_dv_translate_item_source_vport 8765 (matcher, key, priv->vport_id, mask); 8766 /* 8767 * We should always set the vport metadata register, 8768 * otherwise the SW steering library can drop 8769 * the rule if wire vport metadata value is not zero, 8770 * it depends on kernel configuration. 8771 */ 8772 flow_dv_translate_item_meta_vport(matcher, key, 8773 priv->vport_meta_tag, 8774 priv->vport_meta_mask); 8775 } else { 8776 flow_dv_translate_item_source_vport(matcher, key, 8777 priv->vport_id, mask); 8778 } 8779 return 0; 8780 } 8781 8782 /** 8783 * Add ICMP6 item to matcher and to the value. 8784 * 8785 * @param[in, out] matcher 8786 * Flow matcher. 8787 * @param[in, out] key 8788 * Flow matcher value. 8789 * @param[in] item 8790 * Flow pattern to translate. 8791 * @param[in] inner 8792 * Item is inner pattern. 8793 */ 8794 static void 8795 flow_dv_translate_item_icmp6(void *matcher, void *key, 8796 const struct rte_flow_item *item, 8797 int inner) 8798 { 8799 const struct rte_flow_item_icmp6 *icmp6_m = item->mask; 8800 const struct rte_flow_item_icmp6 *icmp6_v = item->spec; 8801 void *headers_m; 8802 void *headers_v; 8803 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 8804 misc_parameters_3); 8805 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8806 if (inner) { 8807 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8808 inner_headers); 8809 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8810 } else { 8811 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8812 outer_headers); 8813 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8814 } 8815 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 8816 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6); 8817 if (!icmp6_v) 8818 return; 8819 if (!icmp6_m) 8820 icmp6_m = &rte_flow_item_icmp6_mask; 8821 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); 8822 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, 8823 icmp6_v->type & icmp6_m->type); 8824 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code); 8825 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code, 8826 icmp6_v->code & icmp6_m->code); 8827 } 8828 8829 /** 8830 * Add ICMP item to matcher and to the value. 8831 * 8832 * @param[in, out] matcher 8833 * Flow matcher. 8834 * @param[in, out] key 8835 * Flow matcher value. 8836 * @param[in] item 8837 * Flow pattern to translate. 8838 * @param[in] inner 8839 * Item is inner pattern. 8840 */ 8841 static void 8842 flow_dv_translate_item_icmp(void *matcher, void *key, 8843 const struct rte_flow_item *item, 8844 int inner) 8845 { 8846 const struct rte_flow_item_icmp *icmp_m = item->mask; 8847 const struct rte_flow_item_icmp *icmp_v = item->spec; 8848 uint32_t icmp_header_data_m = 0; 8849 uint32_t icmp_header_data_v = 0; 8850 void *headers_m; 8851 void *headers_v; 8852 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 8853 misc_parameters_3); 8854 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8855 if (inner) { 8856 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8857 inner_headers); 8858 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8859 } else { 8860 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8861 outer_headers); 8862 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8863 } 8864 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 8865 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP); 8866 if (!icmp_v) 8867 return; 8868 if (!icmp_m) 8869 icmp_m = &rte_flow_item_icmp_mask; 8870 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, 8871 icmp_m->hdr.icmp_type); 8872 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, 8873 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type); 8874 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code, 8875 icmp_m->hdr.icmp_code); 8876 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, 8877 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); 8878 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb); 8879 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16; 8880 if (icmp_header_data_m) { 8881 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb); 8882 icmp_header_data_v |= 8883 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16; 8884 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data, 8885 icmp_header_data_m); 8886 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data, 8887 icmp_header_data_v & icmp_header_data_m); 8888 } 8889 } 8890 8891 /** 8892 * Add GTP item to matcher and to the value. 8893 * 8894 * @param[in, out] matcher 8895 * Flow matcher. 8896 * @param[in, out] key 8897 * Flow matcher value. 8898 * @param[in] item 8899 * Flow pattern to translate. 8900 * @param[in] inner 8901 * Item is inner pattern. 8902 */ 8903 static void 8904 flow_dv_translate_item_gtp(void *matcher, void *key, 8905 const struct rte_flow_item *item, int inner) 8906 { 8907 const struct rte_flow_item_gtp *gtp_m = item->mask; 8908 const struct rte_flow_item_gtp *gtp_v = item->spec; 8909 void *headers_m; 8910 void *headers_v; 8911 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 8912 misc_parameters_3); 8913 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8914 uint16_t dport = RTE_GTPU_UDP_PORT; 8915 8916 if (inner) { 8917 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8918 inner_headers); 8919 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 8920 } else { 8921 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 8922 outer_headers); 8923 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 8924 } 8925 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 8926 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 8927 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 8928 } 8929 if (!gtp_v) 8930 return; 8931 if (!gtp_m) 8932 gtp_m = &rte_flow_item_gtp_mask; 8933 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, 8934 gtp_m->v_pt_rsv_flags); 8935 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, 8936 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags); 8937 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type); 8938 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type, 8939 gtp_v->msg_type & gtp_m->msg_type); 8940 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid, 8941 rte_be_to_cpu_32(gtp_m->teid)); 8942 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid, 8943 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); 8944 } 8945 8946 /** 8947 * Add GTP PSC item to matcher. 8948 * 8949 * @param[in, out] matcher 8950 * Flow matcher. 8951 * @param[in, out] key 8952 * Flow matcher value. 8953 * @param[in] item 8954 * Flow pattern to translate. 8955 */ 8956 static int 8957 flow_dv_translate_item_gtp_psc(void *matcher, void *key, 8958 const struct rte_flow_item *item) 8959 { 8960 const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask; 8961 const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec; 8962 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 8963 misc_parameters_3); 8964 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 8965 union { 8966 uint32_t w32; 8967 struct { 8968 uint16_t seq_num; 8969 uint8_t npdu_num; 8970 uint8_t next_ext_header_type; 8971 }; 8972 } dw_2; 8973 uint8_t gtp_flags; 8974 8975 /* Always set E-flag match on one, regardless of GTP item settings. */ 8976 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags); 8977 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG; 8978 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags); 8979 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags); 8980 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG; 8981 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags); 8982 /*Set next extension header type. */ 8983 dw_2.seq_num = 0; 8984 dw_2.npdu_num = 0; 8985 dw_2.next_ext_header_type = 0xff; 8986 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2, 8987 rte_cpu_to_be_32(dw_2.w32)); 8988 dw_2.seq_num = 0; 8989 dw_2.npdu_num = 0; 8990 dw_2.next_ext_header_type = 0x85; 8991 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2, 8992 rte_cpu_to_be_32(dw_2.w32)); 8993 if (gtp_psc_v) { 8994 union { 8995 uint32_t w32; 8996 struct { 8997 uint8_t len; 8998 uint8_t type_flags; 8999 uint8_t qfi; 9000 uint8_t reserved; 9001 }; 9002 } dw_0; 9003 9004 /*Set extension header PDU type and Qos. */ 9005 if (!gtp_psc_m) 9006 gtp_psc_m = &rte_flow_item_gtp_psc_mask; 9007 dw_0.w32 = 0; 9008 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type); 9009 dw_0.qfi = gtp_psc_m->qfi; 9010 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0, 9011 rte_cpu_to_be_32(dw_0.w32)); 9012 dw_0.w32 = 0; 9013 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type & 9014 gtp_psc_m->pdu_type); 9015 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi; 9016 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0, 9017 rte_cpu_to_be_32(dw_0.w32)); 9018 } 9019 return 0; 9020 } 9021 9022 /** 9023 * Add eCPRI item to matcher and to the value. 9024 * 9025 * @param[in] dev 9026 * The devich to configure through. 9027 * @param[in, out] matcher 9028 * Flow matcher. 9029 * @param[in, out] key 9030 * Flow matcher value. 9031 * @param[in] item 9032 * Flow pattern to translate. 9033 * @param[in] samples 9034 * Sample IDs to be used in the matching. 9035 */ 9036 static void 9037 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, 9038 void *key, const struct rte_flow_item *item) 9039 { 9040 struct mlx5_priv *priv = dev->data->dev_private; 9041 const struct rte_flow_item_ecpri *ecpri_m = item->mask; 9042 const struct rte_flow_item_ecpri *ecpri_v = item->spec; 9043 struct rte_ecpri_common_hdr common; 9044 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, 9045 misc_parameters_4); 9046 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); 9047 uint32_t *samples; 9048 void *dw_m; 9049 void *dw_v; 9050 9051 if (!ecpri_v) 9052 return; 9053 if (!ecpri_m) 9054 ecpri_m = &rte_flow_item_ecpri_mask; 9055 /* 9056 * Maximal four DW samples are supported in a single matching now. 9057 * Two are used now for a eCPRI matching: 9058 * 1. Type: one byte, mask should be 0x00ff0000 in network order 9059 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000 9060 * if any. 9061 */ 9062 if (!ecpri_m->hdr.common.u32) 9063 return; 9064 samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids; 9065 /* Need to take the whole DW as the mask to fill the entry. */ 9066 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, 9067 prog_sample_field_value_0); 9068 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, 9069 prog_sample_field_value_0); 9070 /* Already big endian (network order) in the header. */ 9071 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; 9072 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32; 9073 /* Sample#0, used for matching type, offset 0. */ 9074 MLX5_SET(fte_match_set_misc4, misc4_m, 9075 prog_sample_field_id_0, samples[0]); 9076 /* It makes no sense to set the sample ID in the mask field. */ 9077 MLX5_SET(fte_match_set_misc4, misc4_v, 9078 prog_sample_field_id_0, samples[0]); 9079 /* 9080 * Checking if message body part needs to be matched. 9081 * Some wildcard rules only matching type field should be supported. 9082 */ 9083 if (ecpri_m->hdr.dummy[0]) { 9084 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32); 9085 switch (common.type) { 9086 case RTE_ECPRI_MSG_TYPE_IQ_DATA: 9087 case RTE_ECPRI_MSG_TYPE_RTC_CTRL: 9088 case RTE_ECPRI_MSG_TYPE_DLY_MSR: 9089 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, 9090 prog_sample_field_value_1); 9091 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, 9092 prog_sample_field_value_1); 9093 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; 9094 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] & 9095 ecpri_m->hdr.dummy[0]; 9096 /* Sample#1, to match message body, offset 4. */ 9097 MLX5_SET(fte_match_set_misc4, misc4_m, 9098 prog_sample_field_id_1, samples[1]); 9099 MLX5_SET(fte_match_set_misc4, misc4_v, 9100 prog_sample_field_id_1, samples[1]); 9101 break; 9102 default: 9103 /* Others, do not match any sample ID. */ 9104 break; 9105 } 9106 } 9107 } 9108 9109 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; 9110 9111 #define HEADER_IS_ZERO(match_criteria, headers) \ 9112 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 9113 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 9114 9115 /** 9116 * Calculate flow matcher enable bitmap. 9117 * 9118 * @param match_criteria 9119 * Pointer to flow matcher criteria. 9120 * 9121 * @return 9122 * Bitmap of enabled fields. 9123 */ 9124 static uint8_t 9125 flow_dv_matcher_enable(uint32_t *match_criteria) 9126 { 9127 uint8_t match_criteria_enable; 9128 9129 match_criteria_enable = 9130 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 9131 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT; 9132 match_criteria_enable |= 9133 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 9134 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT; 9135 match_criteria_enable |= 9136 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 9137 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT; 9138 match_criteria_enable |= 9139 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 9140 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 9141 match_criteria_enable |= 9142 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << 9143 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; 9144 match_criteria_enable |= 9145 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) << 9146 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT; 9147 return match_criteria_enable; 9148 } 9149 9150 struct mlx5_hlist_entry * 9151 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) 9152 { 9153 struct mlx5_dev_ctx_shared *sh = list->ctx; 9154 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 9155 struct rte_eth_dev *dev = ctx->dev; 9156 struct mlx5_flow_tbl_data_entry *tbl_data; 9157 struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data; 9158 struct rte_flow_error *error = ctx->error; 9159 union mlx5_flow_tbl_key key = { .v64 = key64 }; 9160 struct mlx5_flow_tbl_resource *tbl; 9161 void *domain; 9162 uint32_t idx = 0; 9163 int ret; 9164 9165 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); 9166 if (!tbl_data) { 9167 rte_flow_error_set(error, ENOMEM, 9168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9169 NULL, 9170 "cannot allocate flow table data entry"); 9171 return NULL; 9172 } 9173 tbl_data->idx = idx; 9174 tbl_data->tunnel = tt_prm->tunnel; 9175 tbl_data->group_id = tt_prm->group_id; 9176 tbl_data->external = !!tt_prm->external; 9177 tbl_data->tunnel_offload = is_tunnel_offload_active(dev); 9178 tbl_data->is_egress = !!key.direction; 9179 tbl_data->is_transfer = !!key.domain; 9180 tbl_data->dummy = !!key.dummy; 9181 tbl_data->table_id = key.table_id; 9182 tbl = &tbl_data->tbl; 9183 if (key.dummy) 9184 return &tbl_data->entry; 9185 if (key.domain) 9186 domain = sh->fdb_domain; 9187 else if (key.direction) 9188 domain = sh->tx_domain; 9189 else 9190 domain = sh->rx_domain; 9191 ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj); 9192 if (ret) { 9193 rte_flow_error_set(error, ENOMEM, 9194 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9195 NULL, "cannot create flow table object"); 9196 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 9197 return NULL; 9198 } 9199 if (key.table_id) { 9200 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 9201 (tbl->obj, &tbl_data->jump.action); 9202 if (ret) { 9203 rte_flow_error_set(error, ENOMEM, 9204 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9205 NULL, 9206 "cannot create flow jump action"); 9207 mlx5_flow_os_destroy_flow_tbl(tbl->obj); 9208 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 9209 return NULL; 9210 } 9211 } 9212 MKSTR(matcher_name, "%s_%s_%u_matcher_cache", 9213 key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress", 9214 key.table_id); 9215 mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh, 9216 flow_dv_matcher_create_cb, 9217 flow_dv_matcher_match_cb, 9218 flow_dv_matcher_remove_cb); 9219 return &tbl_data->entry; 9220 } 9221 9222 int 9223 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused, 9224 struct mlx5_hlist_entry *entry, uint64_t key64, 9225 void *cb_ctx __rte_unused) 9226 { 9227 struct mlx5_flow_tbl_data_entry *tbl_data = 9228 container_of(entry, struct mlx5_flow_tbl_data_entry, entry); 9229 union mlx5_flow_tbl_key key = { .v64 = key64 }; 9230 9231 return tbl_data->table_id != key.table_id || 9232 tbl_data->dummy != key.dummy || 9233 tbl_data->is_transfer != key.domain || 9234 tbl_data->is_egress != key.direction; 9235 } 9236 9237 /** 9238 * Get a flow table. 9239 * 9240 * @param[in, out] dev 9241 * Pointer to rte_eth_dev structure. 9242 * @param[in] table_id 9243 * Table id to use. 9244 * @param[in] egress 9245 * Direction of the table. 9246 * @param[in] transfer 9247 * E-Switch or NIC flow. 9248 * @param[in] dummy 9249 * Dummy entry for dv API. 9250 * @param[out] error 9251 * pointer to error structure. 9252 * 9253 * @return 9254 * Returns tables resource based on the index, NULL in case of failed. 9255 */ 9256 struct mlx5_flow_tbl_resource * 9257 flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 9258 uint32_t table_id, uint8_t egress, 9259 uint8_t transfer, 9260 bool external, 9261 const struct mlx5_flow_tunnel *tunnel, 9262 uint32_t group_id, uint8_t dummy, 9263 struct rte_flow_error *error) 9264 { 9265 struct mlx5_priv *priv = dev->data->dev_private; 9266 union mlx5_flow_tbl_key table_key = { 9267 { 9268 .table_id = table_id, 9269 .dummy = dummy, 9270 .domain = !!transfer, 9271 .direction = !!egress, 9272 } 9273 }; 9274 struct mlx5_flow_tbl_tunnel_prm tt_prm = { 9275 .tunnel = tunnel, 9276 .group_id = group_id, 9277 .external = external, 9278 }; 9279 struct mlx5_flow_cb_ctx ctx = { 9280 .dev = dev, 9281 .error = error, 9282 .data = &tt_prm, 9283 }; 9284 struct mlx5_hlist_entry *entry; 9285 struct mlx5_flow_tbl_data_entry *tbl_data; 9286 9287 entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx); 9288 if (!entry) { 9289 rte_flow_error_set(error, ENOMEM, 9290 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9291 "cannot get table"); 9292 return NULL; 9293 } 9294 DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", 9295 table_id, tunnel ? tunnel->tunnel_id : 0, group_id); 9296 tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); 9297 return &tbl_data->tbl; 9298 } 9299 9300 void 9301 flow_dv_tbl_remove_cb(struct mlx5_hlist *list, 9302 struct mlx5_hlist_entry *entry) 9303 { 9304 struct mlx5_dev_ctx_shared *sh = list->ctx; 9305 struct mlx5_flow_tbl_data_entry *tbl_data = 9306 container_of(entry, struct mlx5_flow_tbl_data_entry, entry); 9307 9308 MLX5_ASSERT(entry && sh); 9309 if (tbl_data->jump.action) 9310 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action); 9311 if (tbl_data->tbl.obj) 9312 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj); 9313 if (tbl_data->tunnel_offload && tbl_data->external) { 9314 struct mlx5_hlist_entry *he; 9315 struct mlx5_hlist *tunnel_grp_hash; 9316 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; 9317 union tunnel_tbl_key tunnel_key = { 9318 .tunnel_id = tbl_data->tunnel ? 9319 tbl_data->tunnel->tunnel_id : 0, 9320 .group = tbl_data->group_id 9321 }; 9322 uint32_t table_id = tbl_data->table_id; 9323 9324 tunnel_grp_hash = tbl_data->tunnel ? 9325 tbl_data->tunnel->groups : 9326 thub->groups; 9327 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL); 9328 if (he) 9329 mlx5_hlist_unregister(tunnel_grp_hash, he); 9330 DRV_LOG(DEBUG, 9331 "Table_id %u tunnel %u group %u released.", 9332 table_id, 9333 tbl_data->tunnel ? 9334 tbl_data->tunnel->tunnel_id : 0, 9335 tbl_data->group_id); 9336 } 9337 mlx5_cache_list_destroy(&tbl_data->matchers); 9338 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); 9339 } 9340 9341 /** 9342 * Release a flow table. 9343 * 9344 * @param[in] sh 9345 * Pointer to device shared structure. 9346 * @param[in] tbl 9347 * Table resource to be released. 9348 * 9349 * @return 9350 * Returns 0 if table was released, else return 1; 9351 */ 9352 static int 9353 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, 9354 struct mlx5_flow_tbl_resource *tbl) 9355 { 9356 struct mlx5_flow_tbl_data_entry *tbl_data = 9357 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 9358 9359 if (!tbl) 9360 return 0; 9361 return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); 9362 } 9363 9364 int 9365 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused, 9366 struct mlx5_cache_entry *entry, void *cb_ctx) 9367 { 9368 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 9369 struct mlx5_flow_dv_matcher *ref = ctx->data; 9370 struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur), 9371 entry); 9372 9373 return cur->crc != ref->crc || 9374 cur->priority != ref->priority || 9375 memcmp((const void *)cur->mask.buf, 9376 (const void *)ref->mask.buf, ref->mask.size); 9377 } 9378 9379 struct mlx5_cache_entry * 9380 flow_dv_matcher_create_cb(struct mlx5_cache_list *list, 9381 struct mlx5_cache_entry *entry __rte_unused, 9382 void *cb_ctx) 9383 { 9384 struct mlx5_dev_ctx_shared *sh = list->ctx; 9385 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 9386 struct mlx5_flow_dv_matcher *ref = ctx->data; 9387 struct mlx5_flow_dv_matcher *cache; 9388 struct mlx5dv_flow_matcher_attr dv_attr = { 9389 .type = IBV_FLOW_ATTR_NORMAL, 9390 .match_mask = (void *)&ref->mask, 9391 }; 9392 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl, 9393 typeof(*tbl), tbl); 9394 int ret; 9395 9396 cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY); 9397 if (!cache) { 9398 rte_flow_error_set(ctx->error, ENOMEM, 9399 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9400 "cannot create matcher"); 9401 return NULL; 9402 } 9403 *cache = *ref; 9404 dv_attr.match_criteria_enable = 9405 flow_dv_matcher_enable(cache->mask.buf); 9406 dv_attr.priority = ref->priority; 9407 if (tbl->is_egress) 9408 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; 9409 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj, 9410 &cache->matcher_object); 9411 if (ret) { 9412 mlx5_free(cache); 9413 rte_flow_error_set(ctx->error, ENOMEM, 9414 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9415 "cannot create matcher"); 9416 return NULL; 9417 } 9418 return &cache->entry; 9419 } 9420 9421 /** 9422 * Register the flow matcher. 9423 * 9424 * @param[in, out] dev 9425 * Pointer to rte_eth_dev structure. 9426 * @param[in, out] matcher 9427 * Pointer to flow matcher. 9428 * @param[in, out] key 9429 * Pointer to flow table key. 9430 * @parm[in, out] dev_flow 9431 * Pointer to the dev_flow. 9432 * @param[out] error 9433 * pointer to error structure. 9434 * 9435 * @return 9436 * 0 on success otherwise -errno and errno is set. 9437 */ 9438 static int 9439 flow_dv_matcher_register(struct rte_eth_dev *dev, 9440 struct mlx5_flow_dv_matcher *ref, 9441 union mlx5_flow_tbl_key *key, 9442 struct mlx5_flow *dev_flow, 9443 const struct mlx5_flow_tunnel *tunnel, 9444 uint32_t group_id, 9445 struct rte_flow_error *error) 9446 { 9447 struct mlx5_cache_entry *entry; 9448 struct mlx5_flow_dv_matcher *cache; 9449 struct mlx5_flow_tbl_resource *tbl; 9450 struct mlx5_flow_tbl_data_entry *tbl_data; 9451 struct mlx5_flow_cb_ctx ctx = { 9452 .error = error, 9453 .data = ref, 9454 }; 9455 9456 /** 9457 * tunnel offload API requires this registration for cases when 9458 * tunnel match rule was inserted before tunnel set rule. 9459 */ 9460 tbl = flow_dv_tbl_resource_get(dev, key->table_id, 9461 key->direction, key->domain, 9462 dev_flow->external, tunnel, 9463 group_id, 0, error); 9464 if (!tbl) 9465 return -rte_errno; /* No need to refill the error info */ 9466 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 9467 ref->tbl = tbl; 9468 entry = mlx5_cache_register(&tbl_data->matchers, &ctx); 9469 if (!entry) { 9470 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); 9471 return rte_flow_error_set(error, ENOMEM, 9472 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9473 "cannot allocate ref memory"); 9474 } 9475 cache = container_of(entry, typeof(*cache), entry); 9476 dev_flow->handle->dvh.matcher = cache; 9477 return 0; 9478 } 9479 9480 struct mlx5_hlist_entry * 9481 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) 9482 { 9483 struct mlx5_dev_ctx_shared *sh = list->ctx; 9484 struct rte_flow_error *error = ctx; 9485 struct mlx5_flow_dv_tag_resource *entry; 9486 uint32_t idx = 0; 9487 int ret; 9488 9489 entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx); 9490 if (!entry) { 9491 rte_flow_error_set(error, ENOMEM, 9492 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9493 "cannot allocate resource memory"); 9494 return NULL; 9495 } 9496 entry->idx = idx; 9497 entry->tag_id = key; 9498 ret = mlx5_flow_os_create_flow_action_tag(key, 9499 &entry->action); 9500 if (ret) { 9501 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx); 9502 rte_flow_error_set(error, ENOMEM, 9503 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9504 NULL, "cannot create action"); 9505 return NULL; 9506 } 9507 return &entry->entry; 9508 } 9509 9510 int 9511 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused, 9512 struct mlx5_hlist_entry *entry, uint64_t key, 9513 void *cb_ctx __rte_unused) 9514 { 9515 struct mlx5_flow_dv_tag_resource *tag = 9516 container_of(entry, struct mlx5_flow_dv_tag_resource, entry); 9517 9518 return key != tag->tag_id; 9519 } 9520 9521 /** 9522 * Find existing tag resource or create and register a new one. 9523 * 9524 * @param dev[in, out] 9525 * Pointer to rte_eth_dev structure. 9526 * @param[in, out] tag_be24 9527 * Tag value in big endian then R-shift 8. 9528 * @parm[in, out] dev_flow 9529 * Pointer to the dev_flow. 9530 * @param[out] error 9531 * pointer to error structure. 9532 * 9533 * @return 9534 * 0 on success otherwise -errno and errno is set. 9535 */ 9536 static int 9537 flow_dv_tag_resource_register 9538 (struct rte_eth_dev *dev, 9539 uint32_t tag_be24, 9540 struct mlx5_flow *dev_flow, 9541 struct rte_flow_error *error) 9542 { 9543 struct mlx5_priv *priv = dev->data->dev_private; 9544 struct mlx5_flow_dv_tag_resource *cache_resource; 9545 struct mlx5_hlist_entry *entry; 9546 9547 entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error); 9548 if (entry) { 9549 cache_resource = container_of 9550 (entry, struct mlx5_flow_dv_tag_resource, entry); 9551 dev_flow->handle->dvh.rix_tag = cache_resource->idx; 9552 dev_flow->dv.tag_resource = cache_resource; 9553 return 0; 9554 } 9555 return -rte_errno; 9556 } 9557 9558 void 9559 flow_dv_tag_remove_cb(struct mlx5_hlist *list, 9560 struct mlx5_hlist_entry *entry) 9561 { 9562 struct mlx5_dev_ctx_shared *sh = list->ctx; 9563 struct mlx5_flow_dv_tag_resource *tag = 9564 container_of(entry, struct mlx5_flow_dv_tag_resource, entry); 9565 9566 MLX5_ASSERT(tag && sh && tag->action); 9567 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); 9568 DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag); 9569 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx); 9570 } 9571 9572 /** 9573 * Release the tag. 9574 * 9575 * @param dev 9576 * Pointer to Ethernet device. 9577 * @param tag_idx 9578 * Tag index. 9579 * 9580 * @return 9581 * 1 while a reference on it exists, 0 when freed. 9582 */ 9583 static int 9584 flow_dv_tag_release(struct rte_eth_dev *dev, 9585 uint32_t tag_idx) 9586 { 9587 struct mlx5_priv *priv = dev->data->dev_private; 9588 struct mlx5_flow_dv_tag_resource *tag; 9589 9590 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); 9591 if (!tag) 9592 return 0; 9593 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", 9594 dev->data->port_id, (void *)tag, tag->entry.ref_cnt); 9595 return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry); 9596 } 9597 9598 /** 9599 * Translate port ID action to vport. 9600 * 9601 * @param[in] dev 9602 * Pointer to rte_eth_dev structure. 9603 * @param[in] action 9604 * Pointer to the port ID action. 9605 * @param[out] dst_port_id 9606 * The target port ID. 9607 * @param[out] error 9608 * Pointer to the error structure. 9609 * 9610 * @return 9611 * 0 on success, a negative errno value otherwise and rte_errno is set. 9612 */ 9613 static int 9614 flow_dv_translate_action_port_id(struct rte_eth_dev *dev, 9615 const struct rte_flow_action *action, 9616 uint32_t *dst_port_id, 9617 struct rte_flow_error *error) 9618 { 9619 uint32_t port; 9620 struct mlx5_priv *priv; 9621 const struct rte_flow_action_port_id *conf = 9622 (const struct rte_flow_action_port_id *)action->conf; 9623 9624 port = conf->original ? dev->data->port_id : conf->id; 9625 priv = mlx5_port_to_eswitch_info(port, false); 9626 if (!priv) 9627 return rte_flow_error_set(error, -rte_errno, 9628 RTE_FLOW_ERROR_TYPE_ACTION, 9629 NULL, 9630 "No eswitch info was found for port"); 9631 #ifdef HAVE_MLX5DV_DR_DEVX_PORT 9632 /* 9633 * This parameter is transferred to 9634 * mlx5dv_dr_action_create_dest_ib_port(). 9635 */ 9636 *dst_port_id = priv->dev_port; 9637 #else 9638 /* 9639 * Legacy mode, no LAG configurations is supported. 9640 * This parameter is transferred to 9641 * mlx5dv_dr_action_create_dest_vport(). 9642 */ 9643 *dst_port_id = priv->vport_id; 9644 #endif 9645 return 0; 9646 } 9647 9648 /** 9649 * Create a counter with aging configuration. 9650 * 9651 * @param[in] dev 9652 * Pointer to rte_eth_dev structure. 9653 * @param[out] count 9654 * Pointer to the counter action configuration. 9655 * @param[in] age 9656 * Pointer to the aging action configuration. 9657 * 9658 * @return 9659 * Index to flow counter on success, 0 otherwise. 9660 */ 9661 static uint32_t 9662 flow_dv_translate_create_counter(struct rte_eth_dev *dev, 9663 struct mlx5_flow *dev_flow, 9664 const struct rte_flow_action_count *count, 9665 const struct rte_flow_action_age *age) 9666 { 9667 uint32_t counter; 9668 struct mlx5_age_param *age_param; 9669 9670 if (count && count->shared) 9671 counter = flow_dv_counter_get_shared(dev, count->id); 9672 else 9673 counter = flow_dv_counter_alloc(dev, !!age); 9674 if (!counter || age == NULL) 9675 return counter; 9676 age_param = flow_dv_counter_idx_get_age(dev, counter); 9677 age_param->context = age->context ? age->context : 9678 (void *)(uintptr_t)(dev_flow->flow_idx); 9679 age_param->timeout = age->timeout; 9680 age_param->port_id = dev->data->port_id; 9681 __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED); 9682 __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); 9683 return counter; 9684 } 9685 9686 /** 9687 * Add Tx queue matcher 9688 * 9689 * @param[in] dev 9690 * Pointer to the dev struct. 9691 * @param[in, out] matcher 9692 * Flow matcher. 9693 * @param[in, out] key 9694 * Flow matcher value. 9695 * @param[in] item 9696 * Flow pattern to translate. 9697 * @param[in] inner 9698 * Item is inner pattern. 9699 */ 9700 static void 9701 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, 9702 void *matcher, void *key, 9703 const struct rte_flow_item *item) 9704 { 9705 const struct mlx5_rte_flow_item_tx_queue *queue_m; 9706 const struct mlx5_rte_flow_item_tx_queue *queue_v; 9707 void *misc_m = 9708 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 9709 void *misc_v = 9710 MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 9711 struct mlx5_txq_ctrl *txq; 9712 uint32_t queue; 9713 9714 9715 queue_m = (const void *)item->mask; 9716 if (!queue_m) 9717 return; 9718 queue_v = (const void *)item->spec; 9719 if (!queue_v) 9720 return; 9721 txq = mlx5_txq_get(dev, queue_v->queue); 9722 if (!txq) 9723 return; 9724 queue = txq->obj->sq->id; 9725 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue); 9726 MLX5_SET(fte_match_set_misc, misc_v, source_sqn, 9727 queue & queue_m->queue); 9728 mlx5_txq_release(dev, queue_v->queue); 9729 } 9730 9731 /** 9732 * Set the hash fields according to the @p flow information. 9733 * 9734 * @param[in] dev_flow 9735 * Pointer to the mlx5_flow. 9736 * @param[in] rss_desc 9737 * Pointer to the mlx5_flow_rss_desc. 9738 */ 9739 static void 9740 flow_dv_hashfields_set(struct mlx5_flow *dev_flow, 9741 struct mlx5_flow_rss_desc *rss_desc) 9742 { 9743 uint64_t items = dev_flow->handle->layers; 9744 int rss_inner = 0; 9745 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types); 9746 9747 dev_flow->hash_fields = 0; 9748 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 9749 if (rss_desc->level >= 2) { 9750 dev_flow->hash_fields |= IBV_RX_HASH_INNER; 9751 rss_inner = 1; 9752 } 9753 #endif 9754 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) || 9755 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) { 9756 if (rss_types & MLX5_IPV4_LAYER_TYPES) { 9757 if (rss_types & ETH_RSS_L3_SRC_ONLY) 9758 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4; 9759 else if (rss_types & ETH_RSS_L3_DST_ONLY) 9760 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4; 9761 else 9762 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH; 9763 } 9764 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || 9765 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) { 9766 if (rss_types & MLX5_IPV6_LAYER_TYPES) { 9767 if (rss_types & ETH_RSS_L3_SRC_ONLY) 9768 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6; 9769 else if (rss_types & ETH_RSS_L3_DST_ONLY) 9770 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6; 9771 else 9772 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH; 9773 } 9774 } 9775 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) || 9776 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) { 9777 if (rss_types & ETH_RSS_UDP) { 9778 if (rss_types & ETH_RSS_L4_SRC_ONLY) 9779 dev_flow->hash_fields |= 9780 IBV_RX_HASH_SRC_PORT_UDP; 9781 else if (rss_types & ETH_RSS_L4_DST_ONLY) 9782 dev_flow->hash_fields |= 9783 IBV_RX_HASH_DST_PORT_UDP; 9784 else 9785 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH; 9786 } 9787 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) || 9788 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) { 9789 if (rss_types & ETH_RSS_TCP) { 9790 if (rss_types & ETH_RSS_L4_SRC_ONLY) 9791 dev_flow->hash_fields |= 9792 IBV_RX_HASH_SRC_PORT_TCP; 9793 else if (rss_types & ETH_RSS_L4_DST_ONLY) 9794 dev_flow->hash_fields |= 9795 IBV_RX_HASH_DST_PORT_TCP; 9796 else 9797 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; 9798 } 9799 } 9800 } 9801 9802 /** 9803 * Prepare an Rx Hash queue. 9804 * 9805 * @param dev 9806 * Pointer to Ethernet device. 9807 * @param[in] dev_flow 9808 * Pointer to the mlx5_flow. 9809 * @param[in] rss_desc 9810 * Pointer to the mlx5_flow_rss_desc. 9811 * @param[out] hrxq_idx 9812 * Hash Rx queue index. 9813 * 9814 * @return 9815 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. 9816 */ 9817 static struct mlx5_hrxq * 9818 flow_dv_hrxq_prepare(struct rte_eth_dev *dev, 9819 struct mlx5_flow *dev_flow, 9820 struct mlx5_flow_rss_desc *rss_desc, 9821 uint32_t *hrxq_idx) 9822 { 9823 struct mlx5_priv *priv = dev->data->dev_private; 9824 struct mlx5_flow_handle *dh = dev_flow->handle; 9825 struct mlx5_hrxq *hrxq; 9826 9827 MLX5_ASSERT(rss_desc->queue_num); 9828 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; 9829 rss_desc->hash_fields = dev_flow->hash_fields; 9830 rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); 9831 rss_desc->shared_rss = 0; 9832 *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); 9833 if (!*hrxq_idx) 9834 return NULL; 9835 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 9836 *hrxq_idx); 9837 return hrxq; 9838 } 9839 9840 /** 9841 * Release sample sub action resource. 9842 * 9843 * @param[in, out] dev 9844 * Pointer to rte_eth_dev structure. 9845 * @param[in] act_res 9846 * Pointer to sample sub action resource. 9847 */ 9848 static void 9849 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, 9850 struct mlx5_flow_sub_actions_idx *act_res) 9851 { 9852 if (act_res->rix_hrxq) { 9853 mlx5_hrxq_release(dev, act_res->rix_hrxq); 9854 act_res->rix_hrxq = 0; 9855 } 9856 if (act_res->rix_encap_decap) { 9857 flow_dv_encap_decap_resource_release(dev, 9858 act_res->rix_encap_decap); 9859 act_res->rix_encap_decap = 0; 9860 } 9861 if (act_res->rix_port_id_action) { 9862 flow_dv_port_id_action_resource_release(dev, 9863 act_res->rix_port_id_action); 9864 act_res->rix_port_id_action = 0; 9865 } 9866 if (act_res->rix_tag) { 9867 flow_dv_tag_release(dev, act_res->rix_tag); 9868 act_res->rix_tag = 0; 9869 } 9870 if (act_res->rix_jump) { 9871 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump); 9872 act_res->rix_jump = 0; 9873 } 9874 } 9875 9876 int 9877 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused, 9878 struct mlx5_cache_entry *entry, void *cb_ctx) 9879 { 9880 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 9881 struct rte_eth_dev *dev = ctx->dev; 9882 struct mlx5_flow_dv_sample_resource *resource = ctx->data; 9883 struct mlx5_flow_dv_sample_resource *cache_resource = 9884 container_of(entry, typeof(*cache_resource), entry); 9885 9886 if (resource->ratio == cache_resource->ratio && 9887 resource->ft_type == cache_resource->ft_type && 9888 resource->ft_id == cache_resource->ft_id && 9889 resource->set_action == cache_resource->set_action && 9890 !memcmp((void *)&resource->sample_act, 9891 (void *)&cache_resource->sample_act, 9892 sizeof(struct mlx5_flow_sub_actions_list))) { 9893 /* 9894 * Existing sample action should release the prepared 9895 * sub-actions reference counter. 9896 */ 9897 flow_dv_sample_sub_actions_release(dev, 9898 &resource->sample_idx); 9899 return 0; 9900 } 9901 return 1; 9902 } 9903 9904 struct mlx5_cache_entry * 9905 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, 9906 struct mlx5_cache_entry *entry __rte_unused, 9907 void *cb_ctx) 9908 { 9909 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 9910 struct rte_eth_dev *dev = ctx->dev; 9911 struct mlx5_flow_dv_sample_resource *resource = ctx->data; 9912 void **sample_dv_actions = resource->sub_actions; 9913 struct mlx5_flow_dv_sample_resource *cache_resource; 9914 struct mlx5dv_dr_flow_sampler_attr sampler_attr; 9915 struct mlx5_priv *priv = dev->data->dev_private; 9916 struct mlx5_dev_ctx_shared *sh = priv->sh; 9917 struct mlx5_flow_tbl_resource *tbl; 9918 uint32_t idx = 0; 9919 const uint32_t next_ft_step = 1; 9920 uint32_t next_ft_id = resource->ft_id + next_ft_step; 9921 uint8_t is_egress = 0; 9922 uint8_t is_transfer = 0; 9923 struct rte_flow_error *error = ctx->error; 9924 9925 /* Register new sample resource. */ 9926 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx); 9927 if (!cache_resource) { 9928 rte_flow_error_set(error, ENOMEM, 9929 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9930 NULL, 9931 "cannot allocate resource memory"); 9932 return NULL; 9933 } 9934 *cache_resource = *resource; 9935 /* Create normal path table level */ 9936 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 9937 is_transfer = 1; 9938 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) 9939 is_egress = 1; 9940 tbl = flow_dv_tbl_resource_get(dev, next_ft_id, 9941 is_egress, is_transfer, 9942 true, NULL, 0, 0, error); 9943 if (!tbl) { 9944 rte_flow_error_set(error, ENOMEM, 9945 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9946 NULL, 9947 "fail to create normal path table " 9948 "for sample"); 9949 goto error; 9950 } 9951 cache_resource->normal_path_tbl = tbl; 9952 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { 9953 if (!sh->default_miss_action) { 9954 rte_flow_error_set(error, ENOMEM, 9955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9956 NULL, 9957 "default miss action was not " 9958 "created"); 9959 goto error; 9960 } 9961 sample_dv_actions[resource->sample_act.actions_num++] = 9962 sh->default_miss_action; 9963 } 9964 /* Create a DR sample action */ 9965 sampler_attr.sample_ratio = cache_resource->ratio; 9966 sampler_attr.default_next_table = tbl->obj; 9967 sampler_attr.num_sample_actions = resource->sample_act.actions_num; 9968 sampler_attr.sample_actions = (struct mlx5dv_dr_action **) 9969 &sample_dv_actions[0]; 9970 sampler_attr.action = cache_resource->set_action; 9971 if (mlx5_os_flow_dr_create_flow_action_sampler 9972 (&sampler_attr, &cache_resource->verbs_action)) { 9973 rte_flow_error_set(error, ENOMEM, 9974 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9975 NULL, "cannot create sample action"); 9976 goto error; 9977 } 9978 cache_resource->idx = idx; 9979 cache_resource->dev = dev; 9980 return &cache_resource->entry; 9981 error: 9982 if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB) 9983 flow_dv_sample_sub_actions_release(dev, 9984 &cache_resource->sample_idx); 9985 if (cache_resource->normal_path_tbl) 9986 flow_dv_tbl_resource_release(MLX5_SH(dev), 9987 cache_resource->normal_path_tbl); 9988 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx); 9989 return NULL; 9990 9991 } 9992 9993 /** 9994 * Find existing sample resource or create and register a new one. 9995 * 9996 * @param[in, out] dev 9997 * Pointer to rte_eth_dev structure. 9998 * @param[in] resource 9999 * Pointer to sample resource. 10000 * @parm[in, out] dev_flow 10001 * Pointer to the dev_flow. 10002 * @param[out] error 10003 * pointer to error structure. 10004 * 10005 * @return 10006 * 0 on success otherwise -errno and errno is set. 10007 */ 10008 static int 10009 flow_dv_sample_resource_register(struct rte_eth_dev *dev, 10010 struct mlx5_flow_dv_sample_resource *resource, 10011 struct mlx5_flow *dev_flow, 10012 struct rte_flow_error *error) 10013 { 10014 struct mlx5_flow_dv_sample_resource *cache_resource; 10015 struct mlx5_cache_entry *entry; 10016 struct mlx5_priv *priv = dev->data->dev_private; 10017 struct mlx5_flow_cb_ctx ctx = { 10018 .dev = dev, 10019 .error = error, 10020 .data = resource, 10021 }; 10022 10023 entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx); 10024 if (!entry) 10025 return -rte_errno; 10026 cache_resource = container_of(entry, typeof(*cache_resource), entry); 10027 dev_flow->handle->dvh.rix_sample = cache_resource->idx; 10028 dev_flow->dv.sample_res = cache_resource; 10029 return 0; 10030 } 10031 10032 int 10033 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused, 10034 struct mlx5_cache_entry *entry, void *cb_ctx) 10035 { 10036 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 10037 struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; 10038 struct rte_eth_dev *dev = ctx->dev; 10039 struct mlx5_flow_dv_dest_array_resource *cache_resource = 10040 container_of(entry, typeof(*cache_resource), entry); 10041 uint32_t idx = 0; 10042 10043 if (resource->num_of_dest == cache_resource->num_of_dest && 10044 resource->ft_type == cache_resource->ft_type && 10045 !memcmp((void *)cache_resource->sample_act, 10046 (void *)resource->sample_act, 10047 (resource->num_of_dest * 10048 sizeof(struct mlx5_flow_sub_actions_list)))) { 10049 /* 10050 * Existing sample action should release the prepared 10051 * sub-actions reference counter. 10052 */ 10053 for (idx = 0; idx < resource->num_of_dest; idx++) 10054 flow_dv_sample_sub_actions_release(dev, 10055 &resource->sample_idx[idx]); 10056 return 0; 10057 } 10058 return 1; 10059 } 10060 10061 struct mlx5_cache_entry * 10062 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, 10063 struct mlx5_cache_entry *entry __rte_unused, 10064 void *cb_ctx) 10065 { 10066 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 10067 struct rte_eth_dev *dev = ctx->dev; 10068 struct mlx5_flow_dv_dest_array_resource *cache_resource; 10069 struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; 10070 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; 10071 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; 10072 struct mlx5_priv *priv = dev->data->dev_private; 10073 struct mlx5_dev_ctx_shared *sh = priv->sh; 10074 struct mlx5_flow_sub_actions_list *sample_act; 10075 struct mlx5dv_dr_domain *domain; 10076 uint32_t idx = 0, res_idx = 0; 10077 struct rte_flow_error *error = ctx->error; 10078 uint64_t action_flags; 10079 int ret; 10080 10081 /* Register new destination array resource. */ 10082 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], 10083 &res_idx); 10084 if (!cache_resource) { 10085 rte_flow_error_set(error, ENOMEM, 10086 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10087 NULL, 10088 "cannot allocate resource memory"); 10089 return NULL; 10090 } 10091 *cache_resource = *resource; 10092 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 10093 domain = sh->fdb_domain; 10094 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 10095 domain = sh->rx_domain; 10096 else 10097 domain = sh->tx_domain; 10098 for (idx = 0; idx < resource->num_of_dest; idx++) { 10099 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *) 10100 mlx5_malloc(MLX5_MEM_ZERO, 10101 sizeof(struct mlx5dv_dr_action_dest_attr), 10102 0, SOCKET_ID_ANY); 10103 if (!dest_attr[idx]) { 10104 rte_flow_error_set(error, ENOMEM, 10105 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10106 NULL, 10107 "cannot allocate resource memory"); 10108 goto error; 10109 } 10110 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST; 10111 sample_act = &resource->sample_act[idx]; 10112 action_flags = sample_act->action_flags; 10113 switch (action_flags) { 10114 case MLX5_FLOW_ACTION_QUEUE: 10115 dest_attr[idx]->dest = sample_act->dr_queue_action; 10116 break; 10117 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP): 10118 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT; 10119 dest_attr[idx]->dest_reformat = &dest_reformat[idx]; 10120 dest_attr[idx]->dest_reformat->reformat = 10121 sample_act->dr_encap_action; 10122 dest_attr[idx]->dest_reformat->dest = 10123 sample_act->dr_port_id_action; 10124 break; 10125 case MLX5_FLOW_ACTION_PORT_ID: 10126 dest_attr[idx]->dest = sample_act->dr_port_id_action; 10127 break; 10128 case MLX5_FLOW_ACTION_JUMP: 10129 dest_attr[idx]->dest = sample_act->dr_jump_action; 10130 break; 10131 default: 10132 rte_flow_error_set(error, EINVAL, 10133 RTE_FLOW_ERROR_TYPE_ACTION, 10134 NULL, 10135 "unsupported actions type"); 10136 goto error; 10137 } 10138 } 10139 /* create a dest array actioin */ 10140 ret = mlx5_os_flow_dr_create_flow_action_dest_array 10141 (domain, 10142 cache_resource->num_of_dest, 10143 dest_attr, 10144 &cache_resource->action); 10145 if (ret) { 10146 rte_flow_error_set(error, ENOMEM, 10147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10148 NULL, 10149 "cannot create destination array action"); 10150 goto error; 10151 } 10152 cache_resource->idx = res_idx; 10153 cache_resource->dev = dev; 10154 for (idx = 0; idx < resource->num_of_dest; idx++) 10155 mlx5_free(dest_attr[idx]); 10156 return &cache_resource->entry; 10157 error: 10158 for (idx = 0; idx < resource->num_of_dest; idx++) { 10159 flow_dv_sample_sub_actions_release(dev, 10160 &cache_resource->sample_idx[idx]); 10161 if (dest_attr[idx]) 10162 mlx5_free(dest_attr[idx]); 10163 } 10164 10165 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx); 10166 return NULL; 10167 } 10168 10169 /** 10170 * Find existing destination array resource or create and register a new one. 10171 * 10172 * @param[in, out] dev 10173 * Pointer to rte_eth_dev structure. 10174 * @param[in] resource 10175 * Pointer to destination array resource. 10176 * @parm[in, out] dev_flow 10177 * Pointer to the dev_flow. 10178 * @param[out] error 10179 * pointer to error structure. 10180 * 10181 * @return 10182 * 0 on success otherwise -errno and errno is set. 10183 */ 10184 static int 10185 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, 10186 struct mlx5_flow_dv_dest_array_resource *resource, 10187 struct mlx5_flow *dev_flow, 10188 struct rte_flow_error *error) 10189 { 10190 struct mlx5_flow_dv_dest_array_resource *cache_resource; 10191 struct mlx5_priv *priv = dev->data->dev_private; 10192 struct mlx5_cache_entry *entry; 10193 struct mlx5_flow_cb_ctx ctx = { 10194 .dev = dev, 10195 .error = error, 10196 .data = resource, 10197 }; 10198 10199 entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx); 10200 if (!entry) 10201 return -rte_errno; 10202 cache_resource = container_of(entry, typeof(*cache_resource), entry); 10203 dev_flow->handle->dvh.rix_dest_array = cache_resource->idx; 10204 dev_flow->dv.dest_array_res = cache_resource; 10205 return 0; 10206 } 10207 10208 /** 10209 * Convert Sample action to DV specification. 10210 * 10211 * @param[in] dev 10212 * Pointer to rte_eth_dev structure. 10213 * @param[in] action 10214 * Pointer to sample action structure. 10215 * @param[in, out] dev_flow 10216 * Pointer to the mlx5_flow. 10217 * @param[in] attr 10218 * Pointer to the flow attributes. 10219 * @param[in, out] num_of_dest 10220 * Pointer to the num of destination. 10221 * @param[in, out] sample_actions 10222 * Pointer to sample actions list. 10223 * @param[in, out] res 10224 * Pointer to sample resource. 10225 * @param[out] error 10226 * Pointer to the error structure. 10227 * 10228 * @return 10229 * 0 on success, a negative errno value otherwise and rte_errno is set. 10230 */ 10231 static int 10232 flow_dv_translate_action_sample(struct rte_eth_dev *dev, 10233 const struct rte_flow_action_sample *action, 10234 struct mlx5_flow *dev_flow, 10235 const struct rte_flow_attr *attr, 10236 uint32_t *num_of_dest, 10237 void **sample_actions, 10238 struct mlx5_flow_dv_sample_resource *res, 10239 struct rte_flow_error *error) 10240 { 10241 struct mlx5_priv *priv = dev->data->dev_private; 10242 const struct rte_flow_action *sub_actions; 10243 struct mlx5_flow_sub_actions_list *sample_act; 10244 struct mlx5_flow_sub_actions_idx *sample_idx; 10245 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 10246 struct rte_flow *flow = dev_flow->flow; 10247 struct mlx5_flow_rss_desc *rss_desc; 10248 uint64_t action_flags = 0; 10249 10250 MLX5_ASSERT(wks); 10251 rss_desc = &wks->rss_desc; 10252 sample_act = &res->sample_act; 10253 sample_idx = &res->sample_idx; 10254 res->ratio = action->ratio; 10255 sub_actions = action->actions; 10256 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) { 10257 int type = sub_actions->type; 10258 uint32_t pre_rix = 0; 10259 void *pre_r; 10260 switch (type) { 10261 case RTE_FLOW_ACTION_TYPE_QUEUE: 10262 { 10263 const struct rte_flow_action_queue *queue; 10264 struct mlx5_hrxq *hrxq; 10265 uint32_t hrxq_idx; 10266 10267 queue = sub_actions->conf; 10268 rss_desc->queue_num = 1; 10269 rss_desc->queue[0] = queue->index; 10270 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, 10271 rss_desc, &hrxq_idx); 10272 if (!hrxq) 10273 return rte_flow_error_set 10274 (error, rte_errno, 10275 RTE_FLOW_ERROR_TYPE_ACTION, 10276 NULL, 10277 "cannot create fate queue"); 10278 sample_act->dr_queue_action = hrxq->action; 10279 sample_idx->rix_hrxq = hrxq_idx; 10280 sample_actions[sample_act->actions_num++] = 10281 hrxq->action; 10282 (*num_of_dest)++; 10283 action_flags |= MLX5_FLOW_ACTION_QUEUE; 10284 if (action_flags & MLX5_FLOW_ACTION_MARK) 10285 dev_flow->handle->rix_hrxq = hrxq_idx; 10286 dev_flow->handle->fate_action = 10287 MLX5_FLOW_FATE_QUEUE; 10288 break; 10289 } 10290 case RTE_FLOW_ACTION_TYPE_RSS: 10291 { 10292 struct mlx5_hrxq *hrxq; 10293 uint32_t hrxq_idx; 10294 const struct rte_flow_action_rss *rss; 10295 const uint8_t *rss_key; 10296 10297 rss = sub_actions->conf; 10298 memcpy(rss_desc->queue, rss->queue, 10299 rss->queue_num * sizeof(uint16_t)); 10300 rss_desc->queue_num = rss->queue_num; 10301 /* NULL RSS key indicates default RSS key. */ 10302 rss_key = !rss->key ? rss_hash_default_key : rss->key; 10303 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 10304 /* 10305 * rss->level and rss.types should be set in advance 10306 * when expanding items for RSS. 10307 */ 10308 flow_dv_hashfields_set(dev_flow, rss_desc); 10309 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, 10310 rss_desc, &hrxq_idx); 10311 if (!hrxq) 10312 return rte_flow_error_set 10313 (error, rte_errno, 10314 RTE_FLOW_ERROR_TYPE_ACTION, 10315 NULL, 10316 "cannot create fate queue"); 10317 sample_act->dr_queue_action = hrxq->action; 10318 sample_idx->rix_hrxq = hrxq_idx; 10319 sample_actions[sample_act->actions_num++] = 10320 hrxq->action; 10321 (*num_of_dest)++; 10322 action_flags |= MLX5_FLOW_ACTION_RSS; 10323 if (action_flags & MLX5_FLOW_ACTION_MARK) 10324 dev_flow->handle->rix_hrxq = hrxq_idx; 10325 dev_flow->handle->fate_action = 10326 MLX5_FLOW_FATE_QUEUE; 10327 break; 10328 } 10329 case RTE_FLOW_ACTION_TYPE_MARK: 10330 { 10331 uint32_t tag_be = mlx5_flow_mark_set 10332 (((const struct rte_flow_action_mark *) 10333 (sub_actions->conf))->id); 10334 10335 dev_flow->handle->mark = 1; 10336 pre_rix = dev_flow->handle->dvh.rix_tag; 10337 /* Save the mark resource before sample */ 10338 pre_r = dev_flow->dv.tag_resource; 10339 if (flow_dv_tag_resource_register(dev, tag_be, 10340 dev_flow, error)) 10341 return -rte_errno; 10342 MLX5_ASSERT(dev_flow->dv.tag_resource); 10343 sample_act->dr_tag_action = 10344 dev_flow->dv.tag_resource->action; 10345 sample_idx->rix_tag = 10346 dev_flow->handle->dvh.rix_tag; 10347 sample_actions[sample_act->actions_num++] = 10348 sample_act->dr_tag_action; 10349 /* Recover the mark resource after sample */ 10350 dev_flow->dv.tag_resource = pre_r; 10351 dev_flow->handle->dvh.rix_tag = pre_rix; 10352 action_flags |= MLX5_FLOW_ACTION_MARK; 10353 break; 10354 } 10355 case RTE_FLOW_ACTION_TYPE_COUNT: 10356 { 10357 if (!flow->counter) { 10358 flow->counter = 10359 flow_dv_translate_create_counter(dev, 10360 dev_flow, sub_actions->conf, 10361 0); 10362 if (!flow->counter) 10363 return rte_flow_error_set 10364 (error, rte_errno, 10365 RTE_FLOW_ERROR_TYPE_ACTION, 10366 NULL, 10367 "cannot create counter" 10368 " object."); 10369 } 10370 sample_act->dr_cnt_action = 10371 (flow_dv_counter_get_by_idx(dev, 10372 flow->counter, NULL))->action; 10373 sample_actions[sample_act->actions_num++] = 10374 sample_act->dr_cnt_action; 10375 action_flags |= MLX5_FLOW_ACTION_COUNT; 10376 break; 10377 } 10378 case RTE_FLOW_ACTION_TYPE_PORT_ID: 10379 { 10380 struct mlx5_flow_dv_port_id_action_resource 10381 port_id_resource; 10382 uint32_t port_id = 0; 10383 10384 memset(&port_id_resource, 0, sizeof(port_id_resource)); 10385 /* Save the port id resource before sample */ 10386 pre_rix = dev_flow->handle->rix_port_id_action; 10387 pre_r = dev_flow->dv.port_id_action; 10388 if (flow_dv_translate_action_port_id(dev, sub_actions, 10389 &port_id, error)) 10390 return -rte_errno; 10391 port_id_resource.port_id = port_id; 10392 if (flow_dv_port_id_action_resource_register 10393 (dev, &port_id_resource, dev_flow, error)) 10394 return -rte_errno; 10395 sample_act->dr_port_id_action = 10396 dev_flow->dv.port_id_action->action; 10397 sample_idx->rix_port_id_action = 10398 dev_flow->handle->rix_port_id_action; 10399 sample_actions[sample_act->actions_num++] = 10400 sample_act->dr_port_id_action; 10401 /* Recover the port id resource after sample */ 10402 dev_flow->dv.port_id_action = pre_r; 10403 dev_flow->handle->rix_port_id_action = pre_rix; 10404 (*num_of_dest)++; 10405 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 10406 break; 10407 } 10408 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 10409 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 10410 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 10411 /* Save the encap resource before sample */ 10412 pre_rix = dev_flow->handle->dvh.rix_encap_decap; 10413 pre_r = dev_flow->dv.encap_decap; 10414 if (flow_dv_create_action_l2_encap(dev, sub_actions, 10415 dev_flow, 10416 attr->transfer, 10417 error)) 10418 return -rte_errno; 10419 sample_act->dr_encap_action = 10420 dev_flow->dv.encap_decap->action; 10421 sample_idx->rix_encap_decap = 10422 dev_flow->handle->dvh.rix_encap_decap; 10423 sample_actions[sample_act->actions_num++] = 10424 sample_act->dr_encap_action; 10425 /* Recover the encap resource after sample */ 10426 dev_flow->dv.encap_decap = pre_r; 10427 dev_flow->handle->dvh.rix_encap_decap = pre_rix; 10428 action_flags |= MLX5_FLOW_ACTION_ENCAP; 10429 break; 10430 default: 10431 return rte_flow_error_set(error, EINVAL, 10432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10433 NULL, 10434 "Not support for sampler action"); 10435 } 10436 } 10437 sample_act->action_flags = action_flags; 10438 res->ft_id = dev_flow->dv.group; 10439 if (attr->transfer) { 10440 union { 10441 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; 10442 uint64_t set_action; 10443 } action_ctx = { .set_action = 0 }; 10444 10445 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 10446 MLX5_SET(set_action_in, action_ctx.action_in, action_type, 10447 MLX5_MODIFICATION_TYPE_SET); 10448 MLX5_SET(set_action_in, action_ctx.action_in, field, 10449 MLX5_MODI_META_REG_C_0); 10450 MLX5_SET(set_action_in, action_ctx.action_in, data, 10451 priv->vport_meta_tag); 10452 res->set_action = action_ctx.set_action; 10453 } else if (attr->ingress) { 10454 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 10455 } else { 10456 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; 10457 } 10458 return 0; 10459 } 10460 10461 /** 10462 * Convert Sample action to DV specification. 10463 * 10464 * @param[in] dev 10465 * Pointer to rte_eth_dev structure. 10466 * @param[in, out] dev_flow 10467 * Pointer to the mlx5_flow. 10468 * @param[in] num_of_dest 10469 * The num of destination. 10470 * @param[in, out] res 10471 * Pointer to sample resource. 10472 * @param[in, out] mdest_res 10473 * Pointer to destination array resource. 10474 * @param[in] sample_actions 10475 * Pointer to sample path actions list. 10476 * @param[in] action_flags 10477 * Holds the actions detected until now. 10478 * @param[out] error 10479 * Pointer to the error structure. 10480 * 10481 * @return 10482 * 0 on success, a negative errno value otherwise and rte_errno is set. 10483 */ 10484 static int 10485 flow_dv_create_action_sample(struct rte_eth_dev *dev, 10486 struct mlx5_flow *dev_flow, 10487 uint32_t num_of_dest, 10488 struct mlx5_flow_dv_sample_resource *res, 10489 struct mlx5_flow_dv_dest_array_resource *mdest_res, 10490 void **sample_actions, 10491 uint64_t action_flags, 10492 struct rte_flow_error *error) 10493 { 10494 /* update normal path action resource into last index of array */ 10495 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; 10496 struct mlx5_flow_sub_actions_list *sample_act = 10497 &mdest_res->sample_act[dest_index]; 10498 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 10499 struct mlx5_flow_rss_desc *rss_desc; 10500 uint32_t normal_idx = 0; 10501 struct mlx5_hrxq *hrxq; 10502 uint32_t hrxq_idx; 10503 10504 MLX5_ASSERT(wks); 10505 rss_desc = &wks->rss_desc; 10506 if (num_of_dest > 1) { 10507 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { 10508 /* Handle QP action for mirroring */ 10509 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, 10510 rss_desc, &hrxq_idx); 10511 if (!hrxq) 10512 return rte_flow_error_set 10513 (error, rte_errno, 10514 RTE_FLOW_ERROR_TYPE_ACTION, 10515 NULL, 10516 "cannot create rx queue"); 10517 normal_idx++; 10518 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx; 10519 sample_act->dr_queue_action = hrxq->action; 10520 if (action_flags & MLX5_FLOW_ACTION_MARK) 10521 dev_flow->handle->rix_hrxq = hrxq_idx; 10522 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 10523 } 10524 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) { 10525 normal_idx++; 10526 mdest_res->sample_idx[dest_index].rix_encap_decap = 10527 dev_flow->handle->dvh.rix_encap_decap; 10528 sample_act->dr_encap_action = 10529 dev_flow->dv.encap_decap->action; 10530 dev_flow->handle->dvh.rix_encap_decap = 0; 10531 } 10532 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) { 10533 normal_idx++; 10534 mdest_res->sample_idx[dest_index].rix_port_id_action = 10535 dev_flow->handle->rix_port_id_action; 10536 sample_act->dr_port_id_action = 10537 dev_flow->dv.port_id_action->action; 10538 dev_flow->handle->rix_port_id_action = 0; 10539 } 10540 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) { 10541 normal_idx++; 10542 mdest_res->sample_idx[dest_index].rix_jump = 10543 dev_flow->handle->rix_jump; 10544 sample_act->dr_jump_action = 10545 dev_flow->dv.jump->action; 10546 dev_flow->handle->rix_jump = 0; 10547 } 10548 sample_act->actions_num = normal_idx; 10549 /* update sample action resource into first index of array */ 10550 mdest_res->ft_type = res->ft_type; 10551 memcpy(&mdest_res->sample_idx[0], &res->sample_idx, 10552 sizeof(struct mlx5_flow_sub_actions_idx)); 10553 memcpy(&mdest_res->sample_act[0], &res->sample_act, 10554 sizeof(struct mlx5_flow_sub_actions_list)); 10555 mdest_res->num_of_dest = num_of_dest; 10556 if (flow_dv_dest_array_resource_register(dev, mdest_res, 10557 dev_flow, error)) 10558 return rte_flow_error_set(error, EINVAL, 10559 RTE_FLOW_ERROR_TYPE_ACTION, 10560 NULL, "can't create sample " 10561 "action"); 10562 } else { 10563 res->sub_actions = sample_actions; 10564 if (flow_dv_sample_resource_register(dev, res, dev_flow, error)) 10565 return rte_flow_error_set(error, EINVAL, 10566 RTE_FLOW_ERROR_TYPE_ACTION, 10567 NULL, 10568 "can't create sample action"); 10569 } 10570 return 0; 10571 } 10572 10573 /** 10574 * Remove an ASO age action from age actions list. 10575 * 10576 * @param[in] dev 10577 * Pointer to the Ethernet device structure. 10578 * @param[in] age 10579 * Pointer to the aso age action handler. 10580 */ 10581 static void 10582 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev, 10583 struct mlx5_aso_age_action *age) 10584 { 10585 struct mlx5_age_info *age_info; 10586 struct mlx5_age_param *age_param = &age->age_params; 10587 struct mlx5_priv *priv = dev->data->dev_private; 10588 uint16_t expected = AGE_CANDIDATE; 10589 10590 age_info = GET_PORT_AGE_INFO(priv); 10591 if (!__atomic_compare_exchange_n(&age_param->state, &expected, 10592 AGE_FREE, false, __ATOMIC_RELAXED, 10593 __ATOMIC_RELAXED)) { 10594 /** 10595 * We need the lock even it is age timeout, 10596 * since age action may still in process. 10597 */ 10598 rte_spinlock_lock(&age_info->aged_sl); 10599 LIST_REMOVE(age, next); 10600 rte_spinlock_unlock(&age_info->aged_sl); 10601 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); 10602 } 10603 } 10604 10605 /** 10606 * Release an ASO age action. 10607 * 10608 * @param[in] dev 10609 * Pointer to the Ethernet device structure. 10610 * @param[in] age_idx 10611 * Index of ASO age action to release. 10612 * @param[in] flow 10613 * True if the release operation is during flow destroy operation. 10614 * False if the release operation is during action destroy operation. 10615 * 10616 * @return 10617 * 0 when age action was removed, otherwise the number of references. 10618 */ 10619 static int 10620 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) 10621 { 10622 struct mlx5_priv *priv = dev->data->dev_private; 10623 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 10624 struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx); 10625 uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED); 10626 10627 if (!ret) { 10628 flow_dv_aso_age_remove_from_age(dev, age); 10629 rte_spinlock_lock(&mng->free_sl); 10630 LIST_INSERT_HEAD(&mng->free, age, next); 10631 rte_spinlock_unlock(&mng->free_sl); 10632 } 10633 return ret; 10634 } 10635 10636 /** 10637 * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. 10638 * 10639 * @param[in] dev 10640 * Pointer to the Ethernet device structure. 10641 * 10642 * @return 10643 * 0 on success, otherwise negative errno value and rte_errno is set. 10644 */ 10645 static int 10646 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) 10647 { 10648 struct mlx5_priv *priv = dev->data->dev_private; 10649 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 10650 void *old_pools = mng->pools; 10651 uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; 10652 uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; 10653 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); 10654 10655 if (!pools) { 10656 rte_errno = ENOMEM; 10657 return -ENOMEM; 10658 } 10659 if (old_pools) { 10660 memcpy(pools, old_pools, 10661 mng->n * sizeof(struct mlx5_flow_counter_pool *)); 10662 mlx5_free(old_pools); 10663 } else { 10664 /* First ASO flow hit allocation - starting ASO data-path. */ 10665 int ret = mlx5_aso_queue_start(priv->sh); 10666 10667 if (ret) { 10668 mlx5_free(pools); 10669 return ret; 10670 } 10671 } 10672 mng->n = resize; 10673 mng->pools = pools; 10674 return 0; 10675 } 10676 10677 /** 10678 * Create and initialize a new ASO aging pool. 10679 * 10680 * @param[in] dev 10681 * Pointer to the Ethernet device structure. 10682 * @param[out] age_free 10683 * Where to put the pointer of a new age action. 10684 * 10685 * @return 10686 * The age actions pool pointer and @p age_free is set on success, 10687 * NULL otherwise and rte_errno is set. 10688 */ 10689 static struct mlx5_aso_age_pool * 10690 flow_dv_age_pool_create(struct rte_eth_dev *dev, 10691 struct mlx5_aso_age_action **age_free) 10692 { 10693 struct mlx5_priv *priv = dev->data->dev_private; 10694 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 10695 struct mlx5_aso_age_pool *pool = NULL; 10696 struct mlx5_devx_obj *obj = NULL; 10697 uint32_t i; 10698 10699 obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx, 10700 priv->sh->pdn); 10701 if (!obj) { 10702 rte_errno = ENODATA; 10703 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX."); 10704 return NULL; 10705 } 10706 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); 10707 if (!pool) { 10708 claim_zero(mlx5_devx_cmd_destroy(obj)); 10709 rte_errno = ENOMEM; 10710 return NULL; 10711 } 10712 pool->flow_hit_aso_obj = obj; 10713 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; 10714 rte_spinlock_lock(&mng->resize_sl); 10715 pool->index = mng->next; 10716 /* Resize pools array if there is no room for the new pool in it. */ 10717 if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { 10718 claim_zero(mlx5_devx_cmd_destroy(obj)); 10719 mlx5_free(pool); 10720 rte_spinlock_unlock(&mng->resize_sl); 10721 return NULL; 10722 } 10723 mng->pools[pool->index] = pool; 10724 mng->next++; 10725 rte_spinlock_unlock(&mng->resize_sl); 10726 /* Assign the first action in the new pool, the rest go to free list. */ 10727 *age_free = &pool->actions[0]; 10728 for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { 10729 pool->actions[i].offset = i; 10730 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next); 10731 } 10732 return pool; 10733 } 10734 10735 /** 10736 * Allocate a ASO aging bit. 10737 * 10738 * @param[in] dev 10739 * Pointer to the Ethernet device structure. 10740 * @param[out] error 10741 * Pointer to the error structure. 10742 * 10743 * @return 10744 * Index to ASO age action on success, 0 otherwise and rte_errno is set. 10745 */ 10746 static uint32_t 10747 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) 10748 { 10749 struct mlx5_priv *priv = dev->data->dev_private; 10750 const struct mlx5_aso_age_pool *pool; 10751 struct mlx5_aso_age_action *age_free = NULL; 10752 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 10753 10754 MLX5_ASSERT(mng); 10755 /* Try to get the next free age action bit. */ 10756 rte_spinlock_lock(&mng->free_sl); 10757 age_free = LIST_FIRST(&mng->free); 10758 if (age_free) { 10759 LIST_REMOVE(age_free, next); 10760 } else if (!flow_dv_age_pool_create(dev, &age_free)) { 10761 rte_spinlock_unlock(&mng->free_sl); 10762 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, 10763 NULL, "failed to create ASO age pool"); 10764 return 0; /* 0 is an error. */ 10765 } 10766 rte_spinlock_unlock(&mng->free_sl); 10767 pool = container_of 10768 ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL]) 10769 (age_free - age_free->offset), const struct mlx5_aso_age_pool, 10770 actions); 10771 if (!age_free->dr_action) { 10772 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0, 10773 error); 10774 10775 if (reg_c < 0) { 10776 rte_flow_error_set(error, rte_errno, 10777 RTE_FLOW_ERROR_TYPE_ACTION, 10778 NULL, "failed to get reg_c " 10779 "for ASO flow hit"); 10780 return 0; /* 0 is an error. */ 10781 } 10782 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 10783 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso 10784 (priv->sh->rx_domain, 10785 pool->flow_hit_aso_obj->obj, age_free->offset, 10786 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET, 10787 (reg_c - REG_C_0)); 10788 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 10789 if (!age_free->dr_action) { 10790 rte_errno = errno; 10791 rte_spinlock_lock(&mng->free_sl); 10792 LIST_INSERT_HEAD(&mng->free, age_free, next); 10793 rte_spinlock_unlock(&mng->free_sl); 10794 rte_flow_error_set(error, rte_errno, 10795 RTE_FLOW_ERROR_TYPE_ACTION, 10796 NULL, "failed to create ASO " 10797 "flow hit action"); 10798 return 0; /* 0 is an error. */ 10799 } 10800 } 10801 __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); 10802 return pool->index | ((age_free->offset + 1) << 16); 10803 } 10804 10805 /** 10806 * Create a age action using ASO mechanism. 10807 * 10808 * @param[in] dev 10809 * Pointer to rte_eth_dev structure. 10810 * @param[in] age 10811 * Pointer to the aging action configuration. 10812 * @param[out] error 10813 * Pointer to the error structure. 10814 * 10815 * @return 10816 * Index to flow counter on success, 0 otherwise. 10817 */ 10818 static uint32_t 10819 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, 10820 const struct rte_flow_action_age *age, 10821 struct rte_flow_error *error) 10822 { 10823 uint32_t age_idx = 0; 10824 struct mlx5_aso_age_action *aso_age; 10825 10826 age_idx = flow_dv_aso_age_alloc(dev, error); 10827 if (!age_idx) 10828 return 0; 10829 aso_age = flow_aso_age_get_by_idx(dev, age_idx); 10830 aso_age->age_params.context = age->context; 10831 aso_age->age_params.timeout = age->timeout; 10832 aso_age->age_params.port_id = dev->data->port_id; 10833 __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, 10834 __ATOMIC_RELAXED); 10835 __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, 10836 __ATOMIC_RELAXED); 10837 return age_idx; 10838 } 10839 10840 /** 10841 * Fill the flow with DV spec, lock free 10842 * (mutex should be acquired by caller). 10843 * 10844 * @param[in] dev 10845 * Pointer to rte_eth_dev structure. 10846 * @param[in, out] dev_flow 10847 * Pointer to the sub flow. 10848 * @param[in] attr 10849 * Pointer to the flow attributes. 10850 * @param[in] items 10851 * Pointer to the list of items. 10852 * @param[in] actions 10853 * Pointer to the list of actions. 10854 * @param[out] error 10855 * Pointer to the error structure. 10856 * 10857 * @return 10858 * 0 on success, a negative errno value otherwise and rte_errno is set. 10859 */ 10860 static int 10861 flow_dv_translate(struct rte_eth_dev *dev, 10862 struct mlx5_flow *dev_flow, 10863 const struct rte_flow_attr *attr, 10864 const struct rte_flow_item items[], 10865 const struct rte_flow_action actions[], 10866 struct rte_flow_error *error) 10867 { 10868 struct mlx5_priv *priv = dev->data->dev_private; 10869 struct mlx5_dev_config *dev_conf = &priv->config; 10870 struct rte_flow *flow = dev_flow->flow; 10871 struct mlx5_flow_handle *handle = dev_flow->handle; 10872 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 10873 struct mlx5_flow_rss_desc *rss_desc; 10874 uint64_t item_flags = 0; 10875 uint64_t last_item = 0; 10876 uint64_t action_flags = 0; 10877 struct mlx5_flow_dv_matcher matcher = { 10878 .mask = { 10879 .size = sizeof(matcher.mask.buf) - 10880 MLX5_ST_SZ_BYTES(fte_match_set_misc4), 10881 }, 10882 }; 10883 int actions_n = 0; 10884 bool actions_end = false; 10885 union { 10886 struct mlx5_flow_dv_modify_hdr_resource res; 10887 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + 10888 sizeof(struct mlx5_modification_cmd) * 10889 (MLX5_MAX_MODIFY_NUM + 1)]; 10890 } mhdr_dummy; 10891 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; 10892 const struct rte_flow_action_count *count = NULL; 10893 const struct rte_flow_action_age *age = NULL; 10894 union flow_dv_attr flow_attr = { .attr = 0 }; 10895 uint32_t tag_be; 10896 union mlx5_flow_tbl_key tbl_key; 10897 uint32_t modify_action_position = UINT32_MAX; 10898 void *match_mask = matcher.mask.buf; 10899 void *match_value = dev_flow->dv.value.buf; 10900 uint8_t next_protocol = 0xff; 10901 struct rte_vlan_hdr vlan = { 0 }; 10902 struct mlx5_flow_dv_dest_array_resource mdest_res; 10903 struct mlx5_flow_dv_sample_resource sample_res; 10904 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; 10905 const struct rte_flow_action_sample *sample = NULL; 10906 struct mlx5_flow_sub_actions_list *sample_act; 10907 uint32_t sample_act_pos = UINT32_MAX; 10908 uint32_t num_of_dest = 0; 10909 int tmp_actions_n = 0; 10910 uint32_t table; 10911 int ret = 0; 10912 const struct mlx5_flow_tunnel *tunnel; 10913 struct flow_grp_info grp_info = { 10914 .external = !!dev_flow->external, 10915 .transfer = !!attr->transfer, 10916 .fdb_def_rule = !!priv->fdb_def_rule, 10917 .skip_scale = dev_flow->skip_scale & 10918 (1 << MLX5_SCALE_FLOW_GROUP_BIT), 10919 }; 10920 10921 if (!wks) 10922 return rte_flow_error_set(error, ENOMEM, 10923 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10924 NULL, 10925 "failed to push flow workspace"); 10926 rss_desc = &wks->rss_desc; 10927 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); 10928 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); 10929 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 10930 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 10931 /* update normal path action resource into last index of array */ 10932 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; 10933 tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ? 10934 flow_items_to_tunnel(items) : 10935 is_flow_tunnel_steer_rule(dev, attr, items, actions) ? 10936 flow_actions_to_tunnel(actions) : 10937 dev_flow->tunnel ? dev_flow->tunnel : NULL; 10938 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 10939 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 10940 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate 10941 (dev, tunnel, attr, items, actions); 10942 ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, 10943 &grp_info, error); 10944 if (ret) 10945 return ret; 10946 dev_flow->dv.group = table; 10947 if (attr->transfer) 10948 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 10949 /* number of actions must be set to 0 in case of dirty stack. */ 10950 mhdr_res->actions_num = 0; 10951 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { 10952 /* 10953 * do not add decap action if match rule drops packet 10954 * HW rejects rules with decap & drop 10955 * 10956 * if tunnel match rule was inserted before matching tunnel set 10957 * rule flow table used in the match rule must be registered. 10958 * current implementation handles that in the 10959 * flow_dv_match_register() at the function end. 10960 */ 10961 bool add_decap = true; 10962 const struct rte_flow_action *ptr = actions; 10963 10964 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { 10965 if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { 10966 add_decap = false; 10967 break; 10968 } 10969 } 10970 if (add_decap) { 10971 if (flow_dv_create_action_l2_decap(dev, dev_flow, 10972 attr->transfer, 10973 error)) 10974 return -rte_errno; 10975 dev_flow->dv.actions[actions_n++] = 10976 dev_flow->dv.encap_decap->action; 10977 action_flags |= MLX5_FLOW_ACTION_DECAP; 10978 } 10979 } 10980 for (; !actions_end ; actions++) { 10981 const struct rte_flow_action_queue *queue; 10982 const struct rte_flow_action_rss *rss; 10983 const struct rte_flow_action *action = actions; 10984 const uint8_t *rss_key; 10985 const struct rte_flow_action_meter *mtr; 10986 struct mlx5_flow_tbl_resource *tbl; 10987 struct mlx5_aso_age_action *age_act; 10988 uint32_t port_id = 0; 10989 struct mlx5_flow_dv_port_id_action_resource port_id_resource; 10990 int action_type = actions->type; 10991 const struct rte_flow_action *found_action = NULL; 10992 struct mlx5_flow_meter *fm = NULL; 10993 uint32_t jump_group = 0; 10994 10995 if (!mlx5_flow_os_action_supported(action_type)) 10996 return rte_flow_error_set(error, ENOTSUP, 10997 RTE_FLOW_ERROR_TYPE_ACTION, 10998 actions, 10999 "action not supported"); 11000 switch (action_type) { 11001 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: 11002 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; 11003 break; 11004 case RTE_FLOW_ACTION_TYPE_VOID: 11005 break; 11006 case RTE_FLOW_ACTION_TYPE_PORT_ID: 11007 if (flow_dv_translate_action_port_id(dev, action, 11008 &port_id, error)) 11009 return -rte_errno; 11010 port_id_resource.port_id = port_id; 11011 MLX5_ASSERT(!handle->rix_port_id_action); 11012 if (flow_dv_port_id_action_resource_register 11013 (dev, &port_id_resource, dev_flow, error)) 11014 return -rte_errno; 11015 dev_flow->dv.actions[actions_n++] = 11016 dev_flow->dv.port_id_action->action; 11017 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 11018 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; 11019 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID; 11020 num_of_dest++; 11021 break; 11022 case RTE_FLOW_ACTION_TYPE_FLAG: 11023 action_flags |= MLX5_FLOW_ACTION_FLAG; 11024 dev_flow->handle->mark = 1; 11025 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 11026 struct rte_flow_action_mark mark = { 11027 .id = MLX5_FLOW_MARK_DEFAULT, 11028 }; 11029 11030 if (flow_dv_convert_action_mark(dev, &mark, 11031 mhdr_res, 11032 error)) 11033 return -rte_errno; 11034 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 11035 break; 11036 } 11037 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); 11038 /* 11039 * Only one FLAG or MARK is supported per device flow 11040 * right now. So the pointer to the tag resource must be 11041 * zero before the register process. 11042 */ 11043 MLX5_ASSERT(!handle->dvh.rix_tag); 11044 if (flow_dv_tag_resource_register(dev, tag_be, 11045 dev_flow, error)) 11046 return -rte_errno; 11047 MLX5_ASSERT(dev_flow->dv.tag_resource); 11048 dev_flow->dv.actions[actions_n++] = 11049 dev_flow->dv.tag_resource->action; 11050 break; 11051 case RTE_FLOW_ACTION_TYPE_MARK: 11052 action_flags |= MLX5_FLOW_ACTION_MARK; 11053 dev_flow->handle->mark = 1; 11054 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 11055 const struct rte_flow_action_mark *mark = 11056 (const struct rte_flow_action_mark *) 11057 actions->conf; 11058 11059 if (flow_dv_convert_action_mark(dev, mark, 11060 mhdr_res, 11061 error)) 11062 return -rte_errno; 11063 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 11064 break; 11065 } 11066 /* Fall-through */ 11067 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 11068 /* Legacy (non-extensive) MARK action. */ 11069 tag_be = mlx5_flow_mark_set 11070 (((const struct rte_flow_action_mark *) 11071 (actions->conf))->id); 11072 MLX5_ASSERT(!handle->dvh.rix_tag); 11073 if (flow_dv_tag_resource_register(dev, tag_be, 11074 dev_flow, error)) 11075 return -rte_errno; 11076 MLX5_ASSERT(dev_flow->dv.tag_resource); 11077 dev_flow->dv.actions[actions_n++] = 11078 dev_flow->dv.tag_resource->action; 11079 break; 11080 case RTE_FLOW_ACTION_TYPE_SET_META: 11081 if (flow_dv_convert_action_set_meta 11082 (dev, mhdr_res, attr, 11083 (const struct rte_flow_action_set_meta *) 11084 actions->conf, error)) 11085 return -rte_errno; 11086 action_flags |= MLX5_FLOW_ACTION_SET_META; 11087 break; 11088 case RTE_FLOW_ACTION_TYPE_SET_TAG: 11089 if (flow_dv_convert_action_set_tag 11090 (dev, mhdr_res, 11091 (const struct rte_flow_action_set_tag *) 11092 actions->conf, error)) 11093 return -rte_errno; 11094 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 11095 break; 11096 case RTE_FLOW_ACTION_TYPE_DROP: 11097 action_flags |= MLX5_FLOW_ACTION_DROP; 11098 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; 11099 break; 11100 case RTE_FLOW_ACTION_TYPE_QUEUE: 11101 queue = actions->conf; 11102 rss_desc->queue_num = 1; 11103 rss_desc->queue[0] = queue->index; 11104 action_flags |= MLX5_FLOW_ACTION_QUEUE; 11105 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 11106 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE; 11107 num_of_dest++; 11108 break; 11109 case RTE_FLOW_ACTION_TYPE_RSS: 11110 rss = actions->conf; 11111 memcpy(rss_desc->queue, rss->queue, 11112 rss->queue_num * sizeof(uint16_t)); 11113 rss_desc->queue_num = rss->queue_num; 11114 /* NULL RSS key indicates default RSS key. */ 11115 rss_key = !rss->key ? rss_hash_default_key : rss->key; 11116 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 11117 /* 11118 * rss->level and rss.types should be set in advance 11119 * when expanding items for RSS. 11120 */ 11121 action_flags |= MLX5_FLOW_ACTION_RSS; 11122 dev_flow->handle->fate_action = rss_desc->shared_rss ? 11123 MLX5_FLOW_FATE_SHARED_RSS : 11124 MLX5_FLOW_FATE_QUEUE; 11125 break; 11126 case MLX5_RTE_FLOW_ACTION_TYPE_AGE: 11127 flow->age = (uint32_t)(uintptr_t)(action->conf); 11128 age_act = flow_aso_age_get_by_idx(dev, flow->age); 11129 __atomic_fetch_add(&age_act->refcnt, 1, 11130 __ATOMIC_RELAXED); 11131 dev_flow->dv.actions[actions_n++] = age_act->dr_action; 11132 action_flags |= MLX5_FLOW_ACTION_AGE; 11133 break; 11134 case RTE_FLOW_ACTION_TYPE_AGE: 11135 if (priv->sh->flow_hit_aso_en && attr->group) { 11136 /* 11137 * Create one shared age action, to be used 11138 * by all sub-flows. 11139 */ 11140 if (!flow->age) { 11141 flow->age = 11142 flow_dv_translate_create_aso_age 11143 (dev, action->conf, 11144 error); 11145 if (!flow->age) 11146 return rte_flow_error_set 11147 (error, rte_errno, 11148 RTE_FLOW_ERROR_TYPE_ACTION, 11149 NULL, 11150 "can't create ASO age action"); 11151 } 11152 dev_flow->dv.actions[actions_n++] = 11153 (flow_aso_age_get_by_idx 11154 (dev, flow->age))->dr_action; 11155 action_flags |= MLX5_FLOW_ACTION_AGE; 11156 break; 11157 } 11158 /* Fall-through */ 11159 case RTE_FLOW_ACTION_TYPE_COUNT: 11160 if (!dev_conf->devx) { 11161 return rte_flow_error_set 11162 (error, ENOTSUP, 11163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 11164 NULL, 11165 "count action not supported"); 11166 } 11167 /* Save information first, will apply later. */ 11168 if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) 11169 count = action->conf; 11170 else 11171 age = action->conf; 11172 action_flags |= MLX5_FLOW_ACTION_COUNT; 11173 break; 11174 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 11175 dev_flow->dv.actions[actions_n++] = 11176 priv->sh->pop_vlan_action; 11177 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 11178 break; 11179 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 11180 if (!(action_flags & 11181 MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) 11182 flow_dev_get_vlan_info_from_items(items, &vlan); 11183 vlan.eth_proto = rte_be_to_cpu_16 11184 ((((const struct rte_flow_action_of_push_vlan *) 11185 actions->conf)->ethertype)); 11186 found_action = mlx5_flow_find_action 11187 (actions + 1, 11188 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); 11189 if (found_action) 11190 mlx5_update_vlan_vid_pcp(found_action, &vlan); 11191 found_action = mlx5_flow_find_action 11192 (actions + 1, 11193 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); 11194 if (found_action) 11195 mlx5_update_vlan_vid_pcp(found_action, &vlan); 11196 if (flow_dv_create_action_push_vlan 11197 (dev, attr, &vlan, dev_flow, error)) 11198 return -rte_errno; 11199 dev_flow->dv.actions[actions_n++] = 11200 dev_flow->dv.push_vlan_res->action; 11201 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 11202 break; 11203 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 11204 /* of_vlan_push action handled this action */ 11205 MLX5_ASSERT(action_flags & 11206 MLX5_FLOW_ACTION_OF_PUSH_VLAN); 11207 break; 11208 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 11209 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) 11210 break; 11211 flow_dev_get_vlan_info_from_items(items, &vlan); 11212 mlx5_update_vlan_vid_pcp(actions, &vlan); 11213 /* If no VLAN push - this is a modify header action */ 11214 if (flow_dv_convert_action_modify_vlan_vid 11215 (mhdr_res, actions, error)) 11216 return -rte_errno; 11217 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 11218 break; 11219 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 11220 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 11221 if (flow_dv_create_action_l2_encap(dev, actions, 11222 dev_flow, 11223 attr->transfer, 11224 error)) 11225 return -rte_errno; 11226 dev_flow->dv.actions[actions_n++] = 11227 dev_flow->dv.encap_decap->action; 11228 action_flags |= MLX5_FLOW_ACTION_ENCAP; 11229 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 11230 sample_act->action_flags |= 11231 MLX5_FLOW_ACTION_ENCAP; 11232 break; 11233 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 11234 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 11235 if (flow_dv_create_action_l2_decap(dev, dev_flow, 11236 attr->transfer, 11237 error)) 11238 return -rte_errno; 11239 dev_flow->dv.actions[actions_n++] = 11240 dev_flow->dv.encap_decap->action; 11241 action_flags |= MLX5_FLOW_ACTION_DECAP; 11242 break; 11243 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 11244 /* Handle encap with preceding decap. */ 11245 if (action_flags & MLX5_FLOW_ACTION_DECAP) { 11246 if (flow_dv_create_action_raw_encap 11247 (dev, actions, dev_flow, attr, error)) 11248 return -rte_errno; 11249 dev_flow->dv.actions[actions_n++] = 11250 dev_flow->dv.encap_decap->action; 11251 } else { 11252 /* Handle encap without preceding decap. */ 11253 if (flow_dv_create_action_l2_encap 11254 (dev, actions, dev_flow, attr->transfer, 11255 error)) 11256 return -rte_errno; 11257 dev_flow->dv.actions[actions_n++] = 11258 dev_flow->dv.encap_decap->action; 11259 } 11260 action_flags |= MLX5_FLOW_ACTION_ENCAP; 11261 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 11262 sample_act->action_flags |= 11263 MLX5_FLOW_ACTION_ENCAP; 11264 break; 11265 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 11266 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) 11267 ; 11268 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 11269 if (flow_dv_create_action_l2_decap 11270 (dev, dev_flow, attr->transfer, error)) 11271 return -rte_errno; 11272 dev_flow->dv.actions[actions_n++] = 11273 dev_flow->dv.encap_decap->action; 11274 } 11275 /* If decap is followed by encap, handle it at encap. */ 11276 action_flags |= MLX5_FLOW_ACTION_DECAP; 11277 break; 11278 case RTE_FLOW_ACTION_TYPE_JUMP: 11279 jump_group = ((const struct rte_flow_action_jump *) 11280 action->conf)->group; 11281 grp_info.std_tbl_fix = 0; 11282 if (dev_flow->skip_scale & 11283 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT)) 11284 grp_info.skip_scale = 1; 11285 else 11286 grp_info.skip_scale = 0; 11287 ret = mlx5_flow_group_to_table(dev, tunnel, 11288 jump_group, 11289 &table, 11290 &grp_info, error); 11291 if (ret) 11292 return ret; 11293 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, 11294 attr->transfer, 11295 !!dev_flow->external, 11296 tunnel, jump_group, 0, 11297 error); 11298 if (!tbl) 11299 return rte_flow_error_set 11300 (error, errno, 11301 RTE_FLOW_ERROR_TYPE_ACTION, 11302 NULL, 11303 "cannot create jump action."); 11304 if (flow_dv_jump_tbl_resource_register 11305 (dev, tbl, dev_flow, error)) { 11306 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); 11307 return rte_flow_error_set 11308 (error, errno, 11309 RTE_FLOW_ERROR_TYPE_ACTION, 11310 NULL, 11311 "cannot create jump action."); 11312 } 11313 dev_flow->dv.actions[actions_n++] = 11314 dev_flow->dv.jump->action; 11315 action_flags |= MLX5_FLOW_ACTION_JUMP; 11316 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; 11317 sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP; 11318 num_of_dest++; 11319 break; 11320 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 11321 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 11322 if (flow_dv_convert_action_modify_mac 11323 (mhdr_res, actions, error)) 11324 return -rte_errno; 11325 action_flags |= actions->type == 11326 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 11327 MLX5_FLOW_ACTION_SET_MAC_SRC : 11328 MLX5_FLOW_ACTION_SET_MAC_DST; 11329 break; 11330 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 11331 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 11332 if (flow_dv_convert_action_modify_ipv4 11333 (mhdr_res, actions, error)) 11334 return -rte_errno; 11335 action_flags |= actions->type == 11336 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 11337 MLX5_FLOW_ACTION_SET_IPV4_SRC : 11338 MLX5_FLOW_ACTION_SET_IPV4_DST; 11339 break; 11340 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 11341 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 11342 if (flow_dv_convert_action_modify_ipv6 11343 (mhdr_res, actions, error)) 11344 return -rte_errno; 11345 action_flags |= actions->type == 11346 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 11347 MLX5_FLOW_ACTION_SET_IPV6_SRC : 11348 MLX5_FLOW_ACTION_SET_IPV6_DST; 11349 break; 11350 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 11351 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 11352 if (flow_dv_convert_action_modify_tp 11353 (mhdr_res, actions, items, 11354 &flow_attr, dev_flow, !!(action_flags & 11355 MLX5_FLOW_ACTION_DECAP), error)) 11356 return -rte_errno; 11357 action_flags |= actions->type == 11358 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 11359 MLX5_FLOW_ACTION_SET_TP_SRC : 11360 MLX5_FLOW_ACTION_SET_TP_DST; 11361 break; 11362 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 11363 if (flow_dv_convert_action_modify_dec_ttl 11364 (mhdr_res, items, &flow_attr, dev_flow, 11365 !!(action_flags & 11366 MLX5_FLOW_ACTION_DECAP), error)) 11367 return -rte_errno; 11368 action_flags |= MLX5_FLOW_ACTION_DEC_TTL; 11369 break; 11370 case RTE_FLOW_ACTION_TYPE_SET_TTL: 11371 if (flow_dv_convert_action_modify_ttl 11372 (mhdr_res, actions, items, &flow_attr, 11373 dev_flow, !!(action_flags & 11374 MLX5_FLOW_ACTION_DECAP), error)) 11375 return -rte_errno; 11376 action_flags |= MLX5_FLOW_ACTION_SET_TTL; 11377 break; 11378 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 11379 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 11380 if (flow_dv_convert_action_modify_tcp_seq 11381 (mhdr_res, actions, error)) 11382 return -rte_errno; 11383 action_flags |= actions->type == 11384 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 11385 MLX5_FLOW_ACTION_INC_TCP_SEQ : 11386 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 11387 break; 11388 11389 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 11390 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 11391 if (flow_dv_convert_action_modify_tcp_ack 11392 (mhdr_res, actions, error)) 11393 return -rte_errno; 11394 action_flags |= actions->type == 11395 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 11396 MLX5_FLOW_ACTION_INC_TCP_ACK : 11397 MLX5_FLOW_ACTION_DEC_TCP_ACK; 11398 break; 11399 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 11400 if (flow_dv_convert_action_set_reg 11401 (mhdr_res, actions, error)) 11402 return -rte_errno; 11403 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 11404 break; 11405 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 11406 if (flow_dv_convert_action_copy_mreg 11407 (dev, mhdr_res, actions, error)) 11408 return -rte_errno; 11409 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 11410 break; 11411 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 11412 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 11413 dev_flow->handle->fate_action = 11414 MLX5_FLOW_FATE_DEFAULT_MISS; 11415 break; 11416 case RTE_FLOW_ACTION_TYPE_METER: 11417 mtr = actions->conf; 11418 if (!flow->meter) { 11419 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, 11420 attr, error); 11421 if (!fm) 11422 return rte_flow_error_set(error, 11423 rte_errno, 11424 RTE_FLOW_ERROR_TYPE_ACTION, 11425 NULL, 11426 "meter not found " 11427 "or invalid parameters"); 11428 flow->meter = fm->idx; 11429 } 11430 /* Set the meter action. */ 11431 if (!fm) { 11432 fm = mlx5_ipool_get(priv->sh->ipool 11433 [MLX5_IPOOL_MTR], flow->meter); 11434 if (!fm) 11435 return rte_flow_error_set(error, 11436 rte_errno, 11437 RTE_FLOW_ERROR_TYPE_ACTION, 11438 NULL, 11439 "meter not found " 11440 "or invalid parameters"); 11441 } 11442 dev_flow->dv.actions[actions_n++] = 11443 fm->mfts->meter_action; 11444 action_flags |= MLX5_FLOW_ACTION_METER; 11445 break; 11446 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 11447 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, 11448 actions, error)) 11449 return -rte_errno; 11450 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 11451 break; 11452 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 11453 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, 11454 actions, error)) 11455 return -rte_errno; 11456 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 11457 break; 11458 case RTE_FLOW_ACTION_TYPE_SAMPLE: 11459 sample_act_pos = actions_n; 11460 sample = (const struct rte_flow_action_sample *) 11461 action->conf; 11462 actions_n++; 11463 action_flags |= MLX5_FLOW_ACTION_SAMPLE; 11464 /* put encap action into group if work with port id */ 11465 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) && 11466 (action_flags & MLX5_FLOW_ACTION_PORT_ID)) 11467 sample_act->action_flags |= 11468 MLX5_FLOW_ACTION_ENCAP; 11469 break; 11470 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 11471 if (flow_dv_convert_action_modify_field 11472 (dev, mhdr_res, actions, attr, error)) 11473 return -rte_errno; 11474 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; 11475 break; 11476 case RTE_FLOW_ACTION_TYPE_END: 11477 actions_end = true; 11478 if (mhdr_res->actions_num) { 11479 /* create modify action if needed. */ 11480 if (flow_dv_modify_hdr_resource_register 11481 (dev, mhdr_res, dev_flow, error)) 11482 return -rte_errno; 11483 dev_flow->dv.actions[modify_action_position] = 11484 handle->dvh.modify_hdr->action; 11485 } 11486 if (action_flags & MLX5_FLOW_ACTION_COUNT) { 11487 /* 11488 * Create one count action, to be used 11489 * by all sub-flows. 11490 */ 11491 if (!flow->counter) { 11492 flow->counter = 11493 flow_dv_translate_create_counter 11494 (dev, dev_flow, count, 11495 age); 11496 if (!flow->counter) 11497 return rte_flow_error_set 11498 (error, rte_errno, 11499 RTE_FLOW_ERROR_TYPE_ACTION, 11500 NULL, "cannot create counter" 11501 " object."); 11502 } 11503 dev_flow->dv.actions[actions_n] = 11504 (flow_dv_counter_get_by_idx(dev, 11505 flow->counter, NULL))->action; 11506 actions_n++; 11507 } 11508 default: 11509 break; 11510 } 11511 if (mhdr_res->actions_num && 11512 modify_action_position == UINT32_MAX) 11513 modify_action_position = actions_n++; 11514 } 11515 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 11516 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 11517 int item_type = items->type; 11518 11519 if (!mlx5_flow_os_item_supported(item_type)) 11520 return rte_flow_error_set(error, ENOTSUP, 11521 RTE_FLOW_ERROR_TYPE_ITEM, 11522 NULL, "item not supported"); 11523 switch (item_type) { 11524 case RTE_FLOW_ITEM_TYPE_PORT_ID: 11525 flow_dv_translate_item_port_id 11526 (dev, match_mask, match_value, items, attr); 11527 last_item = MLX5_FLOW_ITEM_PORT_ID; 11528 break; 11529 case RTE_FLOW_ITEM_TYPE_ETH: 11530 flow_dv_translate_item_eth(match_mask, match_value, 11531 items, tunnel, 11532 dev_flow->dv.group); 11533 matcher.priority = action_flags & 11534 MLX5_FLOW_ACTION_DEFAULT_MISS && 11535 !dev_flow->external ? 11536 MLX5_PRIORITY_MAP_L3 : 11537 MLX5_PRIORITY_MAP_L2; 11538 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 11539 MLX5_FLOW_LAYER_OUTER_L2; 11540 break; 11541 case RTE_FLOW_ITEM_TYPE_VLAN: 11542 flow_dv_translate_item_vlan(dev_flow, 11543 match_mask, match_value, 11544 items, tunnel, 11545 dev_flow->dv.group); 11546 matcher.priority = MLX5_PRIORITY_MAP_L2; 11547 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 11548 MLX5_FLOW_LAYER_INNER_VLAN) : 11549 (MLX5_FLOW_LAYER_OUTER_L2 | 11550 MLX5_FLOW_LAYER_OUTER_VLAN); 11551 break; 11552 case RTE_FLOW_ITEM_TYPE_IPV4: 11553 mlx5_flow_tunnel_ip_check(items, next_protocol, 11554 &item_flags, &tunnel); 11555 flow_dv_translate_item_ipv4(match_mask, match_value, 11556 items, tunnel, 11557 dev_flow->dv.group); 11558 matcher.priority = MLX5_PRIORITY_MAP_L3; 11559 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 11560 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 11561 if (items->mask != NULL && 11562 ((const struct rte_flow_item_ipv4 *) 11563 items->mask)->hdr.next_proto_id) { 11564 next_protocol = 11565 ((const struct rte_flow_item_ipv4 *) 11566 (items->spec))->hdr.next_proto_id; 11567 next_protocol &= 11568 ((const struct rte_flow_item_ipv4 *) 11569 (items->mask))->hdr.next_proto_id; 11570 } else { 11571 /* Reset for inner layer. */ 11572 next_protocol = 0xff; 11573 } 11574 break; 11575 case RTE_FLOW_ITEM_TYPE_IPV6: 11576 mlx5_flow_tunnel_ip_check(items, next_protocol, 11577 &item_flags, &tunnel); 11578 flow_dv_translate_item_ipv6(match_mask, match_value, 11579 items, tunnel, 11580 dev_flow->dv.group); 11581 matcher.priority = MLX5_PRIORITY_MAP_L3; 11582 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 11583 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 11584 if (items->mask != NULL && 11585 ((const struct rte_flow_item_ipv6 *) 11586 items->mask)->hdr.proto) { 11587 next_protocol = 11588 ((const struct rte_flow_item_ipv6 *) 11589 items->spec)->hdr.proto; 11590 next_protocol &= 11591 ((const struct rte_flow_item_ipv6 *) 11592 items->mask)->hdr.proto; 11593 } else { 11594 /* Reset for inner layer. */ 11595 next_protocol = 0xff; 11596 } 11597 break; 11598 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: 11599 flow_dv_translate_item_ipv6_frag_ext(match_mask, 11600 match_value, 11601 items, tunnel); 11602 last_item = tunnel ? 11603 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : 11604 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; 11605 if (items->mask != NULL && 11606 ((const struct rte_flow_item_ipv6_frag_ext *) 11607 items->mask)->hdr.next_header) { 11608 next_protocol = 11609 ((const struct rte_flow_item_ipv6_frag_ext *) 11610 items->spec)->hdr.next_header; 11611 next_protocol &= 11612 ((const struct rte_flow_item_ipv6_frag_ext *) 11613 items->mask)->hdr.next_header; 11614 } else { 11615 /* Reset for inner layer. */ 11616 next_protocol = 0xff; 11617 } 11618 break; 11619 case RTE_FLOW_ITEM_TYPE_TCP: 11620 flow_dv_translate_item_tcp(match_mask, match_value, 11621 items, tunnel); 11622 matcher.priority = MLX5_PRIORITY_MAP_L4; 11623 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 11624 MLX5_FLOW_LAYER_OUTER_L4_TCP; 11625 break; 11626 case RTE_FLOW_ITEM_TYPE_UDP: 11627 flow_dv_translate_item_udp(match_mask, match_value, 11628 items, tunnel); 11629 matcher.priority = MLX5_PRIORITY_MAP_L4; 11630 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 11631 MLX5_FLOW_LAYER_OUTER_L4_UDP; 11632 break; 11633 case RTE_FLOW_ITEM_TYPE_GRE: 11634 flow_dv_translate_item_gre(match_mask, match_value, 11635 items, tunnel); 11636 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11637 last_item = MLX5_FLOW_LAYER_GRE; 11638 break; 11639 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 11640 flow_dv_translate_item_gre_key(match_mask, 11641 match_value, items); 11642 last_item = MLX5_FLOW_LAYER_GRE_KEY; 11643 break; 11644 case RTE_FLOW_ITEM_TYPE_NVGRE: 11645 flow_dv_translate_item_nvgre(match_mask, match_value, 11646 items, tunnel); 11647 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11648 last_item = MLX5_FLOW_LAYER_GRE; 11649 break; 11650 case RTE_FLOW_ITEM_TYPE_VXLAN: 11651 flow_dv_translate_item_vxlan(match_mask, match_value, 11652 items, tunnel); 11653 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11654 last_item = MLX5_FLOW_LAYER_VXLAN; 11655 break; 11656 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 11657 flow_dv_translate_item_vxlan_gpe(match_mask, 11658 match_value, items, 11659 tunnel); 11660 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11661 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 11662 break; 11663 case RTE_FLOW_ITEM_TYPE_GENEVE: 11664 flow_dv_translate_item_geneve(match_mask, match_value, 11665 items, tunnel); 11666 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11667 last_item = MLX5_FLOW_LAYER_GENEVE; 11668 break; 11669 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: 11670 ret = flow_dv_translate_item_geneve_opt(dev, match_mask, 11671 match_value, 11672 items, error); 11673 if (ret) 11674 return rte_flow_error_set(error, -ret, 11675 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 11676 "cannot create GENEVE TLV option"); 11677 flow->geneve_tlv_option = 1; 11678 last_item = MLX5_FLOW_LAYER_GENEVE_OPT; 11679 break; 11680 case RTE_FLOW_ITEM_TYPE_MPLS: 11681 flow_dv_translate_item_mpls(match_mask, match_value, 11682 items, last_item, tunnel); 11683 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11684 last_item = MLX5_FLOW_LAYER_MPLS; 11685 break; 11686 case RTE_FLOW_ITEM_TYPE_MARK: 11687 flow_dv_translate_item_mark(dev, match_mask, 11688 match_value, items); 11689 last_item = MLX5_FLOW_ITEM_MARK; 11690 break; 11691 case RTE_FLOW_ITEM_TYPE_META: 11692 flow_dv_translate_item_meta(dev, match_mask, 11693 match_value, attr, items); 11694 last_item = MLX5_FLOW_ITEM_METADATA; 11695 break; 11696 case RTE_FLOW_ITEM_TYPE_ICMP: 11697 flow_dv_translate_item_icmp(match_mask, match_value, 11698 items, tunnel); 11699 last_item = MLX5_FLOW_LAYER_ICMP; 11700 break; 11701 case RTE_FLOW_ITEM_TYPE_ICMP6: 11702 flow_dv_translate_item_icmp6(match_mask, match_value, 11703 items, tunnel); 11704 last_item = MLX5_FLOW_LAYER_ICMP6; 11705 break; 11706 case RTE_FLOW_ITEM_TYPE_TAG: 11707 flow_dv_translate_item_tag(dev, match_mask, 11708 match_value, items); 11709 last_item = MLX5_FLOW_ITEM_TAG; 11710 break; 11711 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 11712 flow_dv_translate_mlx5_item_tag(dev, match_mask, 11713 match_value, items); 11714 last_item = MLX5_FLOW_ITEM_TAG; 11715 break; 11716 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 11717 flow_dv_translate_item_tx_queue(dev, match_mask, 11718 match_value, 11719 items); 11720 last_item = MLX5_FLOW_ITEM_TX_QUEUE; 11721 break; 11722 case RTE_FLOW_ITEM_TYPE_GTP: 11723 flow_dv_translate_item_gtp(match_mask, match_value, 11724 items, tunnel); 11725 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 11726 last_item = MLX5_FLOW_LAYER_GTP; 11727 break; 11728 case RTE_FLOW_ITEM_TYPE_GTP_PSC: 11729 ret = flow_dv_translate_item_gtp_psc(match_mask, 11730 match_value, 11731 items); 11732 if (ret) 11733 return rte_flow_error_set(error, -ret, 11734 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 11735 "cannot create GTP PSC item"); 11736 last_item = MLX5_FLOW_LAYER_GTP_PSC; 11737 break; 11738 case RTE_FLOW_ITEM_TYPE_ECPRI: 11739 if (!mlx5_flex_parser_ecpri_exist(dev)) { 11740 /* Create it only the first time to be used. */ 11741 ret = mlx5_flex_parser_ecpri_alloc(dev); 11742 if (ret) 11743 return rte_flow_error_set 11744 (error, -ret, 11745 RTE_FLOW_ERROR_TYPE_ITEM, 11746 NULL, 11747 "cannot create eCPRI parser"); 11748 } 11749 /* Adjust the length matcher and device flow value. */ 11750 matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param); 11751 dev_flow->dv.value.size = 11752 MLX5_ST_SZ_BYTES(fte_match_param); 11753 flow_dv_translate_item_ecpri(dev, match_mask, 11754 match_value, items); 11755 /* No other protocol should follow eCPRI layer. */ 11756 last_item = MLX5_FLOW_LAYER_ECPRI; 11757 break; 11758 default: 11759 break; 11760 } 11761 item_flags |= last_item; 11762 } 11763 /* 11764 * When E-Switch mode is enabled, we have two cases where we need to 11765 * set the source port manually. 11766 * The first one, is in case of Nic steering rule, and the second is 11767 * E-Switch rule where no port_id item was found. In both cases 11768 * the source port is set according the current port in use. 11769 */ 11770 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && 11771 (priv->representor || priv->master)) { 11772 if (flow_dv_translate_item_port_id(dev, match_mask, 11773 match_value, NULL, attr)) 11774 return -rte_errno; 11775 } 11776 #ifdef RTE_LIBRTE_MLX5_DEBUG 11777 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, 11778 dev_flow->dv.value.buf)); 11779 #endif 11780 /* 11781 * Layers may be already initialized from prefix flow if this dev_flow 11782 * is the suffix flow. 11783 */ 11784 handle->layers |= item_flags; 11785 if (action_flags & MLX5_FLOW_ACTION_RSS) 11786 flow_dv_hashfields_set(dev_flow, rss_desc); 11787 /* If has RSS action in the sample action, the Sample/Mirror resource 11788 * should be registered after the hash filed be update. 11789 */ 11790 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { 11791 ret = flow_dv_translate_action_sample(dev, 11792 sample, 11793 dev_flow, attr, 11794 &num_of_dest, 11795 sample_actions, 11796 &sample_res, 11797 error); 11798 if (ret < 0) 11799 return ret; 11800 ret = flow_dv_create_action_sample(dev, 11801 dev_flow, 11802 num_of_dest, 11803 &sample_res, 11804 &mdest_res, 11805 sample_actions, 11806 action_flags, 11807 error); 11808 if (ret < 0) 11809 return rte_flow_error_set 11810 (error, rte_errno, 11811 RTE_FLOW_ERROR_TYPE_ACTION, 11812 NULL, 11813 "cannot create sample action"); 11814 if (num_of_dest > 1) { 11815 dev_flow->dv.actions[sample_act_pos] = 11816 dev_flow->dv.dest_array_res->action; 11817 } else { 11818 dev_flow->dv.actions[sample_act_pos] = 11819 dev_flow->dv.sample_res->verbs_action; 11820 } 11821 } 11822 /* 11823 * For multiple destination (sample action with ratio=1), the encap 11824 * action and port id action will be combined into group action. 11825 * So need remove the original these actions in the flow and only 11826 * use the sample action instead of. 11827 */ 11828 if (num_of_dest > 1 && 11829 (sample_act->dr_port_id_action || sample_act->dr_jump_action)) { 11830 int i; 11831 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; 11832 11833 for (i = 0; i < actions_n; i++) { 11834 if ((sample_act->dr_encap_action && 11835 sample_act->dr_encap_action == 11836 dev_flow->dv.actions[i]) || 11837 (sample_act->dr_port_id_action && 11838 sample_act->dr_port_id_action == 11839 dev_flow->dv.actions[i]) || 11840 (sample_act->dr_jump_action && 11841 sample_act->dr_jump_action == 11842 dev_flow->dv.actions[i])) 11843 continue; 11844 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i]; 11845 } 11846 memcpy((void *)dev_flow->dv.actions, 11847 (void *)temp_actions, 11848 tmp_actions_n * sizeof(void *)); 11849 actions_n = tmp_actions_n; 11850 } 11851 dev_flow->dv.actions_n = actions_n; 11852 dev_flow->act_flags = action_flags; 11853 /* Register matcher. */ 11854 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, 11855 matcher.mask.size); 11856 matcher.priority = mlx5_get_matcher_priority(dev, attr, 11857 matcher.priority); 11858 /* reserved field no needs to be set to 0 here. */ 11859 tbl_key.domain = attr->transfer; 11860 tbl_key.direction = attr->egress; 11861 tbl_key.table_id = dev_flow->dv.group; 11862 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, 11863 tunnel, attr->group, error)) 11864 return -rte_errno; 11865 return 0; 11866 } 11867 11868 /** 11869 * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields) 11870 * and tunnel. 11871 * 11872 * @param[in, out] action 11873 * Shred RSS action holding hash RX queue objects. 11874 * @param[in] hash_fields 11875 * Defines combination of packet fields to participate in RX hash. 11876 * @param[in] tunnel 11877 * Tunnel type 11878 * @param[in] hrxq_idx 11879 * Hash RX queue index to set. 11880 * 11881 * @return 11882 * 0 on success, otherwise negative errno value. 11883 */ 11884 static int 11885 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, 11886 const uint64_t hash_fields, 11887 uint32_t hrxq_idx) 11888 { 11889 uint32_t *hrxqs = action->hrxq; 11890 11891 switch (hash_fields & ~IBV_RX_HASH_INNER) { 11892 case MLX5_RSS_HASH_IPV4: 11893 /* fall-through. */ 11894 case MLX5_RSS_HASH_IPV4_DST_ONLY: 11895 /* fall-through. */ 11896 case MLX5_RSS_HASH_IPV4_SRC_ONLY: 11897 hrxqs[0] = hrxq_idx; 11898 return 0; 11899 case MLX5_RSS_HASH_IPV4_TCP: 11900 /* fall-through. */ 11901 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY: 11902 /* fall-through. */ 11903 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY: 11904 hrxqs[1] = hrxq_idx; 11905 return 0; 11906 case MLX5_RSS_HASH_IPV4_UDP: 11907 /* fall-through. */ 11908 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY: 11909 /* fall-through. */ 11910 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY: 11911 hrxqs[2] = hrxq_idx; 11912 return 0; 11913 case MLX5_RSS_HASH_IPV6: 11914 /* fall-through. */ 11915 case MLX5_RSS_HASH_IPV6_DST_ONLY: 11916 /* fall-through. */ 11917 case MLX5_RSS_HASH_IPV6_SRC_ONLY: 11918 hrxqs[3] = hrxq_idx; 11919 return 0; 11920 case MLX5_RSS_HASH_IPV6_TCP: 11921 /* fall-through. */ 11922 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY: 11923 /* fall-through. */ 11924 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY: 11925 hrxqs[4] = hrxq_idx; 11926 return 0; 11927 case MLX5_RSS_HASH_IPV6_UDP: 11928 /* fall-through. */ 11929 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY: 11930 /* fall-through. */ 11931 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY: 11932 hrxqs[5] = hrxq_idx; 11933 return 0; 11934 case MLX5_RSS_HASH_NONE: 11935 hrxqs[6] = hrxq_idx; 11936 return 0; 11937 default: 11938 return -1; 11939 } 11940 } 11941 11942 /** 11943 * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) 11944 * and tunnel. 11945 * 11946 * @param[in] dev 11947 * Pointer to the Ethernet device structure. 11948 * @param[in] idx 11949 * Shared RSS action ID holding hash RX queue objects. 11950 * @param[in] hash_fields 11951 * Defines combination of packet fields to participate in RX hash. 11952 * @param[in] tunnel 11953 * Tunnel type 11954 * 11955 * @return 11956 * Valid hash RX queue index, otherwise 0. 11957 */ 11958 static uint32_t 11959 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, 11960 const uint64_t hash_fields) 11961 { 11962 struct mlx5_priv *priv = dev->data->dev_private; 11963 struct mlx5_shared_action_rss *shared_rss = 11964 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 11965 const uint32_t *hrxqs = shared_rss->hrxq; 11966 11967 switch (hash_fields & ~IBV_RX_HASH_INNER) { 11968 case MLX5_RSS_HASH_IPV4: 11969 /* fall-through. */ 11970 case MLX5_RSS_HASH_IPV4_DST_ONLY: 11971 /* fall-through. */ 11972 case MLX5_RSS_HASH_IPV4_SRC_ONLY: 11973 return hrxqs[0]; 11974 case MLX5_RSS_HASH_IPV4_TCP: 11975 /* fall-through. */ 11976 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY: 11977 /* fall-through. */ 11978 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY: 11979 return hrxqs[1]; 11980 case MLX5_RSS_HASH_IPV4_UDP: 11981 /* fall-through. */ 11982 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY: 11983 /* fall-through. */ 11984 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY: 11985 return hrxqs[2]; 11986 case MLX5_RSS_HASH_IPV6: 11987 /* fall-through. */ 11988 case MLX5_RSS_HASH_IPV6_DST_ONLY: 11989 /* fall-through. */ 11990 case MLX5_RSS_HASH_IPV6_SRC_ONLY: 11991 return hrxqs[3]; 11992 case MLX5_RSS_HASH_IPV6_TCP: 11993 /* fall-through. */ 11994 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY: 11995 /* fall-through. */ 11996 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY: 11997 return hrxqs[4]; 11998 case MLX5_RSS_HASH_IPV6_UDP: 11999 /* fall-through. */ 12000 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY: 12001 /* fall-through. */ 12002 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY: 12003 return hrxqs[5]; 12004 case MLX5_RSS_HASH_NONE: 12005 return hrxqs[6]; 12006 default: 12007 return 0; 12008 } 12009 12010 } 12011 12012 /** 12013 * Apply the flow to the NIC, lock free, 12014 * (mutex should be acquired by caller). 12015 * 12016 * @param[in] dev 12017 * Pointer to the Ethernet device structure. 12018 * @param[in, out] flow 12019 * Pointer to flow structure. 12020 * @param[out] error 12021 * Pointer to error structure. 12022 * 12023 * @return 12024 * 0 on success, a negative errno value otherwise and rte_errno is set. 12025 */ 12026 static int 12027 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 12028 struct rte_flow_error *error) 12029 { 12030 struct mlx5_flow_dv_workspace *dv; 12031 struct mlx5_flow_handle *dh; 12032 struct mlx5_flow_handle_dv *dv_h; 12033 struct mlx5_flow *dev_flow; 12034 struct mlx5_priv *priv = dev->data->dev_private; 12035 uint32_t handle_idx; 12036 int n; 12037 int err; 12038 int idx; 12039 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 12040 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; 12041 12042 MLX5_ASSERT(wks); 12043 for (idx = wks->flow_idx - 1; idx >= 0; idx--) { 12044 dev_flow = &wks->flows[idx]; 12045 dv = &dev_flow->dv; 12046 dh = dev_flow->handle; 12047 dv_h = &dh->dvh; 12048 n = dv->actions_n; 12049 if (dh->fate_action == MLX5_FLOW_FATE_DROP) { 12050 if (dv->transfer) { 12051 MLX5_ASSERT(priv->sh->dr_drop_action); 12052 dv->actions[n++] = priv->sh->dr_drop_action; 12053 } else { 12054 #ifdef HAVE_MLX5DV_DR 12055 /* DR supports drop action placeholder. */ 12056 MLX5_ASSERT(priv->sh->dr_drop_action); 12057 dv->actions[n++] = priv->sh->dr_drop_action; 12058 #else 12059 /* For DV we use the explicit drop queue. */ 12060 MLX5_ASSERT(priv->drop_queue.hrxq); 12061 dv->actions[n++] = 12062 priv->drop_queue.hrxq->action; 12063 #endif 12064 } 12065 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && 12066 !dv_h->rix_sample && !dv_h->rix_dest_array)) { 12067 struct mlx5_hrxq *hrxq; 12068 uint32_t hrxq_idx; 12069 12070 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, 12071 &hrxq_idx); 12072 if (!hrxq) { 12073 rte_flow_error_set 12074 (error, rte_errno, 12075 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12076 "cannot get hash queue"); 12077 goto error; 12078 } 12079 dh->rix_hrxq = hrxq_idx; 12080 dv->actions[n++] = hrxq->action; 12081 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 12082 struct mlx5_hrxq *hrxq = NULL; 12083 uint32_t hrxq_idx; 12084 12085 hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, 12086 rss_desc->shared_rss, 12087 dev_flow->hash_fields); 12088 if (hrxq_idx) 12089 hrxq = mlx5_ipool_get 12090 (priv->sh->ipool[MLX5_IPOOL_HRXQ], 12091 hrxq_idx); 12092 if (!hrxq) { 12093 rte_flow_error_set 12094 (error, rte_errno, 12095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12096 "cannot get hash queue"); 12097 goto error; 12098 } 12099 dh->rix_srss = rss_desc->shared_rss; 12100 dv->actions[n++] = hrxq->action; 12101 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { 12102 if (!priv->sh->default_miss_action) { 12103 rte_flow_error_set 12104 (error, rte_errno, 12105 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12106 "default miss action not be created."); 12107 goto error; 12108 } 12109 dv->actions[n++] = priv->sh->default_miss_action; 12110 } 12111 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, 12112 (void *)&dv->value, n, 12113 dv->actions, &dh->drv_flow); 12114 if (err) { 12115 rte_flow_error_set(error, errno, 12116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12117 NULL, 12118 "hardware refuses to create flow"); 12119 goto error; 12120 } 12121 if (priv->vmwa_context && 12122 dh->vf_vlan.tag && !dh->vf_vlan.created) { 12123 /* 12124 * The rule contains the VLAN pattern. 12125 * For VF we are going to create VLAN 12126 * interface to make hypervisor set correct 12127 * e-Switch vport context. 12128 */ 12129 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); 12130 } 12131 } 12132 return 0; 12133 error: 12134 err = rte_errno; /* Save rte_errno before cleanup. */ 12135 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 12136 handle_idx, dh, next) { 12137 /* hrxq is union, don't clear it if the flag is not set. */ 12138 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { 12139 mlx5_hrxq_release(dev, dh->rix_hrxq); 12140 dh->rix_hrxq = 0; 12141 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 12142 dh->rix_srss = 0; 12143 } 12144 if (dh->vf_vlan.tag && dh->vf_vlan.created) 12145 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 12146 } 12147 rte_errno = err; /* Restore rte_errno. */ 12148 return -rte_errno; 12149 } 12150 12151 void 12152 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused, 12153 struct mlx5_cache_entry *entry) 12154 { 12155 struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache), 12156 entry); 12157 12158 claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object)); 12159 mlx5_free(cache); 12160 } 12161 12162 /** 12163 * Release the flow matcher. 12164 * 12165 * @param dev 12166 * Pointer to Ethernet device. 12167 * @param port_id 12168 * Index to port ID action resource. 12169 * 12170 * @return 12171 * 1 while a reference on it exists, 0 when freed. 12172 */ 12173 static int 12174 flow_dv_matcher_release(struct rte_eth_dev *dev, 12175 struct mlx5_flow_handle *handle) 12176 { 12177 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; 12178 struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl, 12179 typeof(*tbl), tbl); 12180 int ret; 12181 12182 MLX5_ASSERT(matcher->matcher_object); 12183 ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry); 12184 flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl); 12185 return ret; 12186 } 12187 12188 /** 12189 * Release encap_decap resource. 12190 * 12191 * @param list 12192 * Pointer to the hash list. 12193 * @param entry 12194 * Pointer to exist resource entry object. 12195 */ 12196 void 12197 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, 12198 struct mlx5_hlist_entry *entry) 12199 { 12200 struct mlx5_dev_ctx_shared *sh = list->ctx; 12201 struct mlx5_flow_dv_encap_decap_resource *res = 12202 container_of(entry, typeof(*res), entry); 12203 12204 claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); 12205 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx); 12206 } 12207 12208 /** 12209 * Release an encap/decap resource. 12210 * 12211 * @param dev 12212 * Pointer to Ethernet device. 12213 * @param encap_decap_idx 12214 * Index of encap decap resource. 12215 * 12216 * @return 12217 * 1 while a reference on it exists, 0 when freed. 12218 */ 12219 static int 12220 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, 12221 uint32_t encap_decap_idx) 12222 { 12223 struct mlx5_priv *priv = dev->data->dev_private; 12224 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 12225 12226 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 12227 encap_decap_idx); 12228 if (!cache_resource) 12229 return 0; 12230 MLX5_ASSERT(cache_resource->action); 12231 return mlx5_hlist_unregister(priv->sh->encaps_decaps, 12232 &cache_resource->entry); 12233 } 12234 12235 /** 12236 * Release an jump to table action resource. 12237 * 12238 * @param dev 12239 * Pointer to Ethernet device. 12240 * @param rix_jump 12241 * Index to the jump action resource. 12242 * 12243 * @return 12244 * 1 while a reference on it exists, 0 when freed. 12245 */ 12246 static int 12247 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, 12248 uint32_t rix_jump) 12249 { 12250 struct mlx5_priv *priv = dev->data->dev_private; 12251 struct mlx5_flow_tbl_data_entry *tbl_data; 12252 12253 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], 12254 rix_jump); 12255 if (!tbl_data) 12256 return 0; 12257 return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl); 12258 } 12259 12260 void 12261 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused, 12262 struct mlx5_hlist_entry *entry) 12263 { 12264 struct mlx5_flow_dv_modify_hdr_resource *res = 12265 container_of(entry, typeof(*res), entry); 12266 12267 claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); 12268 mlx5_free(entry); 12269 } 12270 12271 /** 12272 * Release a modify-header resource. 12273 * 12274 * @param dev 12275 * Pointer to Ethernet device. 12276 * @param handle 12277 * Pointer to mlx5_flow_handle. 12278 * 12279 * @return 12280 * 1 while a reference on it exists, 0 when freed. 12281 */ 12282 static int 12283 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, 12284 struct mlx5_flow_handle *handle) 12285 { 12286 struct mlx5_priv *priv = dev->data->dev_private; 12287 struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr; 12288 12289 MLX5_ASSERT(entry->action); 12290 return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry); 12291 } 12292 12293 void 12294 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, 12295 struct mlx5_cache_entry *entry) 12296 { 12297 struct mlx5_dev_ctx_shared *sh = list->ctx; 12298 struct mlx5_flow_dv_port_id_action_resource *cache = 12299 container_of(entry, typeof(*cache), entry); 12300 12301 claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); 12302 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx); 12303 } 12304 12305 /** 12306 * Release port ID action resource. 12307 * 12308 * @param dev 12309 * Pointer to Ethernet device. 12310 * @param handle 12311 * Pointer to mlx5_flow_handle. 12312 * 12313 * @return 12314 * 1 while a reference on it exists, 0 when freed. 12315 */ 12316 static int 12317 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, 12318 uint32_t port_id) 12319 { 12320 struct mlx5_priv *priv = dev->data->dev_private; 12321 struct mlx5_flow_dv_port_id_action_resource *cache; 12322 12323 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id); 12324 if (!cache) 12325 return 0; 12326 MLX5_ASSERT(cache->action); 12327 return mlx5_cache_unregister(&priv->sh->port_id_action_list, 12328 &cache->entry); 12329 } 12330 12331 /** 12332 * Release shared RSS action resource. 12333 * 12334 * @param dev 12335 * Pointer to Ethernet device. 12336 * @param srss 12337 * Shared RSS action index. 12338 */ 12339 static void 12340 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss) 12341 { 12342 struct mlx5_priv *priv = dev->data->dev_private; 12343 struct mlx5_shared_action_rss *shared_rss; 12344 12345 shared_rss = mlx5_ipool_get 12346 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss); 12347 __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); 12348 } 12349 12350 void 12351 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, 12352 struct mlx5_cache_entry *entry) 12353 { 12354 struct mlx5_dev_ctx_shared *sh = list->ctx; 12355 struct mlx5_flow_dv_push_vlan_action_resource *cache = 12356 container_of(entry, typeof(*cache), entry); 12357 12358 claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); 12359 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx); 12360 } 12361 12362 /** 12363 * Release push vlan action resource. 12364 * 12365 * @param dev 12366 * Pointer to Ethernet device. 12367 * @param handle 12368 * Pointer to mlx5_flow_handle. 12369 * 12370 * @return 12371 * 1 while a reference on it exists, 0 when freed. 12372 */ 12373 static int 12374 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, 12375 struct mlx5_flow_handle *handle) 12376 { 12377 struct mlx5_priv *priv = dev->data->dev_private; 12378 struct mlx5_flow_dv_push_vlan_action_resource *cache; 12379 uint32_t idx = handle->dvh.rix_push_vlan; 12380 12381 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); 12382 if (!cache) 12383 return 0; 12384 MLX5_ASSERT(cache->action); 12385 return mlx5_cache_unregister(&priv->sh->push_vlan_action_list, 12386 &cache->entry); 12387 } 12388 12389 /** 12390 * Release the fate resource. 12391 * 12392 * @param dev 12393 * Pointer to Ethernet device. 12394 * @param handle 12395 * Pointer to mlx5_flow_handle. 12396 */ 12397 static void 12398 flow_dv_fate_resource_release(struct rte_eth_dev *dev, 12399 struct mlx5_flow_handle *handle) 12400 { 12401 if (!handle->rix_fate) 12402 return; 12403 switch (handle->fate_action) { 12404 case MLX5_FLOW_FATE_QUEUE: 12405 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array) 12406 mlx5_hrxq_release(dev, handle->rix_hrxq); 12407 break; 12408 case MLX5_FLOW_FATE_JUMP: 12409 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump); 12410 break; 12411 case MLX5_FLOW_FATE_PORT_ID: 12412 flow_dv_port_id_action_resource_release(dev, 12413 handle->rix_port_id_action); 12414 break; 12415 default: 12416 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); 12417 break; 12418 } 12419 handle->rix_fate = 0; 12420 } 12421 12422 void 12423 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused, 12424 struct mlx5_cache_entry *entry) 12425 { 12426 struct mlx5_flow_dv_sample_resource *cache_resource = 12427 container_of(entry, typeof(*cache_resource), entry); 12428 struct rte_eth_dev *dev = cache_resource->dev; 12429 struct mlx5_priv *priv = dev->data->dev_private; 12430 12431 if (cache_resource->verbs_action) 12432 claim_zero(mlx5_flow_os_destroy_flow_action 12433 (cache_resource->verbs_action)); 12434 if (cache_resource->normal_path_tbl) 12435 flow_dv_tbl_resource_release(MLX5_SH(dev), 12436 cache_resource->normal_path_tbl); 12437 flow_dv_sample_sub_actions_release(dev, 12438 &cache_resource->sample_idx); 12439 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], 12440 cache_resource->idx); 12441 DRV_LOG(DEBUG, "sample resource %p: removed", 12442 (void *)cache_resource); 12443 } 12444 12445 /** 12446 * Release an sample resource. 12447 * 12448 * @param dev 12449 * Pointer to Ethernet device. 12450 * @param handle 12451 * Pointer to mlx5_flow_handle. 12452 * 12453 * @return 12454 * 1 while a reference on it exists, 0 when freed. 12455 */ 12456 static int 12457 flow_dv_sample_resource_release(struct rte_eth_dev *dev, 12458 struct mlx5_flow_handle *handle) 12459 { 12460 struct mlx5_priv *priv = dev->data->dev_private; 12461 struct mlx5_flow_dv_sample_resource *cache_resource; 12462 12463 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], 12464 handle->dvh.rix_sample); 12465 if (!cache_resource) 12466 return 0; 12467 MLX5_ASSERT(cache_resource->verbs_action); 12468 return mlx5_cache_unregister(&priv->sh->sample_action_list, 12469 &cache_resource->entry); 12470 } 12471 12472 void 12473 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused, 12474 struct mlx5_cache_entry *entry) 12475 { 12476 struct mlx5_flow_dv_dest_array_resource *cache_resource = 12477 container_of(entry, typeof(*cache_resource), entry); 12478 struct rte_eth_dev *dev = cache_resource->dev; 12479 struct mlx5_priv *priv = dev->data->dev_private; 12480 uint32_t i = 0; 12481 12482 MLX5_ASSERT(cache_resource->action); 12483 if (cache_resource->action) 12484 claim_zero(mlx5_flow_os_destroy_flow_action 12485 (cache_resource->action)); 12486 for (; i < cache_resource->num_of_dest; i++) 12487 flow_dv_sample_sub_actions_release(dev, 12488 &cache_resource->sample_idx[i]); 12489 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], 12490 cache_resource->idx); 12491 DRV_LOG(DEBUG, "destination array resource %p: removed", 12492 (void *)cache_resource); 12493 } 12494 12495 /** 12496 * Release an destination array resource. 12497 * 12498 * @param dev 12499 * Pointer to Ethernet device. 12500 * @param handle 12501 * Pointer to mlx5_flow_handle. 12502 * 12503 * @return 12504 * 1 while a reference on it exists, 0 when freed. 12505 */ 12506 static int 12507 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, 12508 struct mlx5_flow_handle *handle) 12509 { 12510 struct mlx5_priv *priv = dev->data->dev_private; 12511 struct mlx5_flow_dv_dest_array_resource *cache; 12512 12513 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], 12514 handle->dvh.rix_dest_array); 12515 if (!cache) 12516 return 0; 12517 MLX5_ASSERT(cache->action); 12518 return mlx5_cache_unregister(&priv->sh->dest_array_list, 12519 &cache->entry); 12520 } 12521 12522 static void 12523 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev) 12524 { 12525 struct mlx5_priv *priv = dev->data->dev_private; 12526 struct mlx5_dev_ctx_shared *sh = priv->sh; 12527 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource = 12528 sh->geneve_tlv_option_resource; 12529 rte_spinlock_lock(&sh->geneve_tlv_opt_sl); 12530 if (geneve_opt_resource) { 12531 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1, 12532 __ATOMIC_RELAXED))) { 12533 claim_zero(mlx5_devx_cmd_destroy 12534 (geneve_opt_resource->obj)); 12535 mlx5_free(sh->geneve_tlv_option_resource); 12536 sh->geneve_tlv_option_resource = NULL; 12537 } 12538 } 12539 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 12540 } 12541 12542 /** 12543 * Remove the flow from the NIC but keeps it in memory. 12544 * Lock free, (mutex should be acquired by caller). 12545 * 12546 * @param[in] dev 12547 * Pointer to Ethernet device. 12548 * @param[in, out] flow 12549 * Pointer to flow structure. 12550 */ 12551 static void 12552 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 12553 { 12554 struct mlx5_flow_handle *dh; 12555 uint32_t handle_idx; 12556 struct mlx5_priv *priv = dev->data->dev_private; 12557 12558 if (!flow) 12559 return; 12560 handle_idx = flow->dev_handles; 12561 while (handle_idx) { 12562 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 12563 handle_idx); 12564 if (!dh) 12565 return; 12566 if (dh->drv_flow) { 12567 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); 12568 dh->drv_flow = NULL; 12569 } 12570 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) 12571 flow_dv_fate_resource_release(dev, dh); 12572 if (dh->vf_vlan.tag && dh->vf_vlan.created) 12573 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 12574 handle_idx = dh->next.next; 12575 } 12576 } 12577 12578 /** 12579 * Remove the flow from the NIC and the memory. 12580 * Lock free, (mutex should be acquired by caller). 12581 * 12582 * @param[in] dev 12583 * Pointer to the Ethernet device structure. 12584 * @param[in, out] flow 12585 * Pointer to flow structure. 12586 */ 12587 static void 12588 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 12589 { 12590 struct mlx5_flow_handle *dev_handle; 12591 struct mlx5_priv *priv = dev->data->dev_private; 12592 uint32_t srss = 0; 12593 12594 if (!flow) 12595 return; 12596 flow_dv_remove(dev, flow); 12597 if (flow->counter) { 12598 flow_dv_counter_free(dev, flow->counter); 12599 flow->counter = 0; 12600 } 12601 if (flow->meter) { 12602 struct mlx5_flow_meter *fm; 12603 12604 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], 12605 flow->meter); 12606 if (fm) 12607 mlx5_flow_meter_detach(fm); 12608 flow->meter = 0; 12609 } 12610 if (flow->age) 12611 flow_dv_aso_age_release(dev, flow->age); 12612 if (flow->geneve_tlv_option) { 12613 flow_dv_geneve_tlv_option_resource_release(dev); 12614 flow->geneve_tlv_option = 0; 12615 } 12616 while (flow->dev_handles) { 12617 uint32_t tmp_idx = flow->dev_handles; 12618 12619 dev_handle = mlx5_ipool_get(priv->sh->ipool 12620 [MLX5_IPOOL_MLX5_FLOW], tmp_idx); 12621 if (!dev_handle) 12622 return; 12623 flow->dev_handles = dev_handle->next.next; 12624 if (dev_handle->dvh.matcher) 12625 flow_dv_matcher_release(dev, dev_handle); 12626 if (dev_handle->dvh.rix_sample) 12627 flow_dv_sample_resource_release(dev, dev_handle); 12628 if (dev_handle->dvh.rix_dest_array) 12629 flow_dv_dest_array_resource_release(dev, dev_handle); 12630 if (dev_handle->dvh.rix_encap_decap) 12631 flow_dv_encap_decap_resource_release(dev, 12632 dev_handle->dvh.rix_encap_decap); 12633 if (dev_handle->dvh.modify_hdr) 12634 flow_dv_modify_hdr_resource_release(dev, dev_handle); 12635 if (dev_handle->dvh.rix_push_vlan) 12636 flow_dv_push_vlan_action_resource_release(dev, 12637 dev_handle); 12638 if (dev_handle->dvh.rix_tag) 12639 flow_dv_tag_release(dev, 12640 dev_handle->dvh.rix_tag); 12641 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS) 12642 flow_dv_fate_resource_release(dev, dev_handle); 12643 else if (!srss) 12644 srss = dev_handle->rix_srss; 12645 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 12646 tmp_idx); 12647 } 12648 if (srss) 12649 flow_dv_shared_rss_action_release(dev, srss); 12650 } 12651 12652 /** 12653 * Release array of hash RX queue objects. 12654 * Helper function. 12655 * 12656 * @param[in] dev 12657 * Pointer to the Ethernet device structure. 12658 * @param[in, out] hrxqs 12659 * Array of hash RX queue objects. 12660 * 12661 * @return 12662 * Total number of references to hash RX queue objects in *hrxqs* array 12663 * after this operation. 12664 */ 12665 static int 12666 __flow_dv_hrxqs_release(struct rte_eth_dev *dev, 12667 uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN]) 12668 { 12669 size_t i; 12670 int remaining = 0; 12671 12672 for (i = 0; i < RTE_DIM(*hrxqs); i++) { 12673 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]); 12674 12675 if (!ret) 12676 (*hrxqs)[i] = 0; 12677 remaining += ret; 12678 } 12679 return remaining; 12680 } 12681 12682 /** 12683 * Release all hash RX queue objects representing shared RSS action. 12684 * 12685 * @param[in] dev 12686 * Pointer to the Ethernet device structure. 12687 * @param[in, out] action 12688 * Shared RSS action to remove hash RX queue objects from. 12689 * 12690 * @return 12691 * Total number of references to hash RX queue objects stored in *action* 12692 * after this operation. 12693 * Expected to be 0 if no external references held. 12694 */ 12695 static int 12696 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, 12697 struct mlx5_shared_action_rss *shared_rss) 12698 { 12699 return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq); 12700 } 12701 12702 /** 12703 * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to 12704 * user input. 12705 * 12706 * Only one hash value is available for one L3+L4 combination: 12707 * for example: 12708 * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and 12709 * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share 12710 * same slot in mlx5_rss_hash_fields. 12711 * 12712 * @param[in] rss 12713 * Pointer to the shared action RSS conf. 12714 * @param[in, out] hash_field 12715 * hash_field variable needed to be adjusted. 12716 * 12717 * @return 12718 * void 12719 */ 12720 static void 12721 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss, 12722 uint64_t *hash_field) 12723 { 12724 uint64_t rss_types = rss->origin.types; 12725 12726 switch (*hash_field & ~IBV_RX_HASH_INNER) { 12727 case MLX5_RSS_HASH_IPV4: 12728 if (rss_types & MLX5_IPV4_LAYER_TYPES) { 12729 *hash_field &= ~MLX5_RSS_HASH_IPV4; 12730 if (rss_types & ETH_RSS_L3_DST_ONLY) 12731 *hash_field |= IBV_RX_HASH_DST_IPV4; 12732 else if (rss_types & ETH_RSS_L3_SRC_ONLY) 12733 *hash_field |= IBV_RX_HASH_SRC_IPV4; 12734 else 12735 *hash_field |= MLX5_RSS_HASH_IPV4; 12736 } 12737 return; 12738 case MLX5_RSS_HASH_IPV6: 12739 if (rss_types & MLX5_IPV6_LAYER_TYPES) { 12740 *hash_field &= ~MLX5_RSS_HASH_IPV6; 12741 if (rss_types & ETH_RSS_L3_DST_ONLY) 12742 *hash_field |= IBV_RX_HASH_DST_IPV6; 12743 else if (rss_types & ETH_RSS_L3_SRC_ONLY) 12744 *hash_field |= IBV_RX_HASH_SRC_IPV6; 12745 else 12746 *hash_field |= MLX5_RSS_HASH_IPV6; 12747 } 12748 return; 12749 case MLX5_RSS_HASH_IPV4_UDP: 12750 /* fall-through. */ 12751 case MLX5_RSS_HASH_IPV6_UDP: 12752 if (rss_types & ETH_RSS_UDP) { 12753 *hash_field &= ~MLX5_UDP_IBV_RX_HASH; 12754 if (rss_types & ETH_RSS_L4_DST_ONLY) 12755 *hash_field |= IBV_RX_HASH_DST_PORT_UDP; 12756 else if (rss_types & ETH_RSS_L4_SRC_ONLY) 12757 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP; 12758 else 12759 *hash_field |= MLX5_UDP_IBV_RX_HASH; 12760 } 12761 return; 12762 case MLX5_RSS_HASH_IPV4_TCP: 12763 /* fall-through. */ 12764 case MLX5_RSS_HASH_IPV6_TCP: 12765 if (rss_types & ETH_RSS_TCP) { 12766 *hash_field &= ~MLX5_TCP_IBV_RX_HASH; 12767 if (rss_types & ETH_RSS_L4_DST_ONLY) 12768 *hash_field |= IBV_RX_HASH_DST_PORT_TCP; 12769 else if (rss_types & ETH_RSS_L4_SRC_ONLY) 12770 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP; 12771 else 12772 *hash_field |= MLX5_TCP_IBV_RX_HASH; 12773 } 12774 return; 12775 default: 12776 return; 12777 } 12778 } 12779 12780 /** 12781 * Setup shared RSS action. 12782 * Prepare set of hash RX queue objects sufficient to handle all valid 12783 * hash_fields combinations (see enum ibv_rx_hash_fields). 12784 * 12785 * @param[in] dev 12786 * Pointer to the Ethernet device structure. 12787 * @param[in] action_idx 12788 * Shared RSS action ipool index. 12789 * @param[in, out] action 12790 * Partially initialized shared RSS action. 12791 * @param[out] error 12792 * Perform verbose error reporting if not NULL. Initialized in case of 12793 * error only. 12794 * 12795 * @return 12796 * 0 on success, otherwise negative errno value. 12797 */ 12798 static int 12799 __flow_dv_action_rss_setup(struct rte_eth_dev *dev, 12800 uint32_t action_idx, 12801 struct mlx5_shared_action_rss *shared_rss, 12802 struct rte_flow_error *error) 12803 { 12804 struct mlx5_flow_rss_desc rss_desc = { 0 }; 12805 size_t i; 12806 int err; 12807 12808 if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { 12809 return rte_flow_error_set(error, rte_errno, 12810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12811 "cannot setup indirection table"); 12812 } 12813 memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN); 12814 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; 12815 rss_desc.const_q = shared_rss->origin.queue; 12816 rss_desc.queue_num = shared_rss->origin.queue_num; 12817 /* Set non-zero value to indicate a shared RSS. */ 12818 rss_desc.shared_rss = action_idx; 12819 rss_desc.ind_tbl = shared_rss->ind_tbl; 12820 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { 12821 uint32_t hrxq_idx; 12822 uint64_t hash_fields = mlx5_rss_hash_fields[i]; 12823 int tunnel = 0; 12824 12825 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields); 12826 if (shared_rss->origin.level > 1) { 12827 hash_fields |= IBV_RX_HASH_INNER; 12828 tunnel = 1; 12829 } 12830 rss_desc.tunnel = tunnel; 12831 rss_desc.hash_fields = hash_fields; 12832 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); 12833 if (!hrxq_idx) { 12834 rte_flow_error_set 12835 (error, rte_errno, 12836 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12837 "cannot get hash queue"); 12838 goto error_hrxq_new; 12839 } 12840 err = __flow_dv_action_rss_hrxq_set 12841 (shared_rss, hash_fields, hrxq_idx); 12842 MLX5_ASSERT(!err); 12843 } 12844 return 0; 12845 error_hrxq_new: 12846 err = rte_errno; 12847 __flow_dv_action_rss_hrxqs_release(dev, shared_rss); 12848 if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) 12849 shared_rss->ind_tbl = NULL; 12850 rte_errno = err; 12851 return -rte_errno; 12852 } 12853 12854 /** 12855 * Create shared RSS action. 12856 * 12857 * @param[in] dev 12858 * Pointer to the Ethernet device structure. 12859 * @param[in] conf 12860 * Shared action configuration. 12861 * @param[in] rss 12862 * RSS action specification used to create shared action. 12863 * @param[out] error 12864 * Perform verbose error reporting if not NULL. Initialized in case of 12865 * error only. 12866 * 12867 * @return 12868 * A valid shared action ID in case of success, 0 otherwise and 12869 * rte_errno is set. 12870 */ 12871 static uint32_t 12872 __flow_dv_action_rss_create(struct rte_eth_dev *dev, 12873 const struct rte_flow_indir_action_conf *conf, 12874 const struct rte_flow_action_rss *rss, 12875 struct rte_flow_error *error) 12876 { 12877 struct mlx5_priv *priv = dev->data->dev_private; 12878 struct mlx5_shared_action_rss *shared_rss = NULL; 12879 void *queue = NULL; 12880 struct rte_flow_action_rss *origin; 12881 const uint8_t *rss_key; 12882 uint32_t queue_size = rss->queue_num * sizeof(uint16_t); 12883 uint32_t idx; 12884 12885 RTE_SET_USED(conf); 12886 queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 12887 0, SOCKET_ID_ANY); 12888 shared_rss = mlx5_ipool_zmalloc 12889 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); 12890 if (!shared_rss || !queue) { 12891 rte_flow_error_set(error, ENOMEM, 12892 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12893 "cannot allocate resource memory"); 12894 goto error_rss_init; 12895 } 12896 if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) { 12897 rte_flow_error_set(error, E2BIG, 12898 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12899 "rss action number out of range"); 12900 goto error_rss_init; 12901 } 12902 shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, 12903 sizeof(*shared_rss->ind_tbl), 12904 0, SOCKET_ID_ANY); 12905 if (!shared_rss->ind_tbl) { 12906 rte_flow_error_set(error, ENOMEM, 12907 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 12908 "cannot allocate resource memory"); 12909 goto error_rss_init; 12910 } 12911 memcpy(queue, rss->queue, queue_size); 12912 shared_rss->ind_tbl->queues = queue; 12913 shared_rss->ind_tbl->queues_n = rss->queue_num; 12914 origin = &shared_rss->origin; 12915 origin->func = rss->func; 12916 origin->level = rss->level; 12917 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 12918 origin->types = !rss->types ? ETH_RSS_IP : rss->types; 12919 /* NULL RSS key indicates default RSS key. */ 12920 rss_key = !rss->key ? rss_hash_default_key : rss->key; 12921 memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 12922 origin->key = &shared_rss->key[0]; 12923 origin->key_len = MLX5_RSS_HASH_KEY_LEN; 12924 origin->queue = queue; 12925 origin->queue_num = rss->queue_num; 12926 if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error)) 12927 goto error_rss_init; 12928 rte_spinlock_init(&shared_rss->action_rss_sl); 12929 __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); 12930 rte_spinlock_lock(&priv->shared_act_sl); 12931 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 12932 &priv->rss_shared_actions, idx, shared_rss, next); 12933 rte_spinlock_unlock(&priv->shared_act_sl); 12934 return idx; 12935 error_rss_init: 12936 if (shared_rss) { 12937 if (shared_rss->ind_tbl) 12938 mlx5_free(shared_rss->ind_tbl); 12939 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 12940 idx); 12941 } 12942 if (queue) 12943 mlx5_free(queue); 12944 return 0; 12945 } 12946 12947 /** 12948 * Destroy the shared RSS action. 12949 * Release related hash RX queue objects. 12950 * 12951 * @param[in] dev 12952 * Pointer to the Ethernet device structure. 12953 * @param[in] idx 12954 * The shared RSS action object ID to be removed. 12955 * @param[out] error 12956 * Perform verbose error reporting if not NULL. Initialized in case of 12957 * error only. 12958 * 12959 * @return 12960 * 0 on success, otherwise negative errno value. 12961 */ 12962 static int 12963 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, 12964 struct rte_flow_error *error) 12965 { 12966 struct mlx5_priv *priv = dev->data->dev_private; 12967 struct mlx5_shared_action_rss *shared_rss = 12968 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 12969 uint32_t old_refcnt = 1; 12970 int remaining; 12971 uint16_t *queue = NULL; 12972 12973 if (!shared_rss) 12974 return rte_flow_error_set(error, EINVAL, 12975 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 12976 "invalid shared action"); 12977 remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); 12978 if (remaining) 12979 return rte_flow_error_set(error, EBUSY, 12980 RTE_FLOW_ERROR_TYPE_ACTION, 12981 NULL, 12982 "shared rss hrxq has references"); 12983 if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, 12984 0, 0, __ATOMIC_ACQUIRE, 12985 __ATOMIC_RELAXED)) 12986 return rte_flow_error_set(error, EBUSY, 12987 RTE_FLOW_ERROR_TYPE_ACTION, 12988 NULL, 12989 "shared rss has references"); 12990 queue = shared_rss->ind_tbl->queues; 12991 remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); 12992 if (remaining) 12993 return rte_flow_error_set(error, EBUSY, 12994 RTE_FLOW_ERROR_TYPE_ACTION, 12995 NULL, 12996 "shared rss indirection table has" 12997 " references"); 12998 mlx5_free(queue); 12999 rte_spinlock_lock(&priv->shared_act_sl); 13000 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 13001 &priv->rss_shared_actions, idx, shared_rss, next); 13002 rte_spinlock_unlock(&priv->shared_act_sl); 13003 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 13004 idx); 13005 return 0; 13006 } 13007 13008 /** 13009 * Create indirect action, lock free, 13010 * (mutex should be acquired by caller). 13011 * Dispatcher for action type specific call. 13012 * 13013 * @param[in] dev 13014 * Pointer to the Ethernet device structure. 13015 * @param[in] conf 13016 * Shared action configuration. 13017 * @param[in] action 13018 * Action specification used to create indirect action. 13019 * @param[out] error 13020 * Perform verbose error reporting if not NULL. Initialized in case of 13021 * error only. 13022 * 13023 * @return 13024 * A valid shared action handle in case of success, NULL otherwise and 13025 * rte_errno is set. 13026 */ 13027 static struct rte_flow_action_handle * 13028 flow_dv_action_create(struct rte_eth_dev *dev, 13029 const struct rte_flow_indir_action_conf *conf, 13030 const struct rte_flow_action *action, 13031 struct rte_flow_error *err) 13032 { 13033 uint32_t idx = 0; 13034 uint32_t ret = 0; 13035 13036 switch (action->type) { 13037 case RTE_FLOW_ACTION_TYPE_RSS: 13038 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err); 13039 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS << 13040 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; 13041 break; 13042 case RTE_FLOW_ACTION_TYPE_AGE: 13043 ret = flow_dv_translate_create_aso_age(dev, action->conf, err); 13044 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE << 13045 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; 13046 if (ret) { 13047 struct mlx5_aso_age_action *aso_age = 13048 flow_aso_age_get_by_idx(dev, ret); 13049 13050 if (!aso_age->age_params.context) 13051 aso_age->age_params.context = 13052 (void *)(uintptr_t)idx; 13053 } 13054 break; 13055 default: 13056 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 13057 NULL, "action type not supported"); 13058 break; 13059 } 13060 return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL; 13061 } 13062 13063 /** 13064 * Destroy the indirect action. 13065 * Release action related resources on the NIC and the memory. 13066 * Lock free, (mutex should be acquired by caller). 13067 * Dispatcher for action type specific call. 13068 * 13069 * @param[in] dev 13070 * Pointer to the Ethernet device structure. 13071 * @param[in] handle 13072 * The indirect action object handle to be removed. 13073 * @param[out] error 13074 * Perform verbose error reporting if not NULL. Initialized in case of 13075 * error only. 13076 * 13077 * @return 13078 * 0 on success, otherwise negative errno value. 13079 */ 13080 static int 13081 flow_dv_action_destroy(struct rte_eth_dev *dev, 13082 struct rte_flow_action_handle *handle, 13083 struct rte_flow_error *error) 13084 { 13085 uint32_t act_idx = (uint32_t)(uintptr_t)handle; 13086 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 13087 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 13088 int ret; 13089 13090 switch (type) { 13091 case MLX5_INDIRECT_ACTION_TYPE_RSS: 13092 return __flow_dv_action_rss_release(dev, idx, error); 13093 case MLX5_INDIRECT_ACTION_TYPE_AGE: 13094 ret = flow_dv_aso_age_release(dev, idx); 13095 if (ret) 13096 /* 13097 * In this case, the last flow has a reference will 13098 * actually release the age action. 13099 */ 13100 DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was" 13101 " released with references %d.", idx, ret); 13102 return 0; 13103 default: 13104 return rte_flow_error_set(error, ENOTSUP, 13105 RTE_FLOW_ERROR_TYPE_ACTION, 13106 NULL, 13107 "action type not supported"); 13108 } 13109 } 13110 13111 /** 13112 * Updates in place shared RSS action configuration. 13113 * 13114 * @param[in] dev 13115 * Pointer to the Ethernet device structure. 13116 * @param[in] idx 13117 * The shared RSS action object ID to be updated. 13118 * @param[in] action_conf 13119 * RSS action specification used to modify *shared_rss*. 13120 * @param[out] error 13121 * Perform verbose error reporting if not NULL. Initialized in case of 13122 * error only. 13123 * 13124 * @return 13125 * 0 on success, otherwise negative errno value. 13126 * @note: currently only support update of RSS queues. 13127 */ 13128 static int 13129 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, 13130 const struct rte_flow_action_rss *action_conf, 13131 struct rte_flow_error *error) 13132 { 13133 struct mlx5_priv *priv = dev->data->dev_private; 13134 struct mlx5_shared_action_rss *shared_rss = 13135 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 13136 int ret = 0; 13137 void *queue = NULL; 13138 uint16_t *queue_old = NULL; 13139 uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); 13140 13141 if (!shared_rss) 13142 return rte_flow_error_set(error, EINVAL, 13143 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 13144 "invalid shared action to update"); 13145 if (priv->obj_ops.ind_table_modify == NULL) 13146 return rte_flow_error_set(error, ENOTSUP, 13147 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 13148 "cannot modify indirection table"); 13149 queue = mlx5_malloc(MLX5_MEM_ZERO, 13150 RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 13151 0, SOCKET_ID_ANY); 13152 if (!queue) 13153 return rte_flow_error_set(error, ENOMEM, 13154 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13155 NULL, 13156 "cannot allocate resource memory"); 13157 memcpy(queue, action_conf->queue, queue_size); 13158 MLX5_ASSERT(shared_rss->ind_tbl); 13159 rte_spinlock_lock(&shared_rss->action_rss_sl); 13160 queue_old = shared_rss->ind_tbl->queues; 13161 ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl, 13162 queue, action_conf->queue_num, true); 13163 if (ret) { 13164 mlx5_free(queue); 13165 ret = rte_flow_error_set(error, rte_errno, 13166 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 13167 "cannot update indirection table"); 13168 } else { 13169 mlx5_free(queue_old); 13170 shared_rss->origin.queue = queue; 13171 shared_rss->origin.queue_num = action_conf->queue_num; 13172 } 13173 rte_spinlock_unlock(&shared_rss->action_rss_sl); 13174 return ret; 13175 } 13176 13177 /** 13178 * Updates in place shared action configuration, lock free, 13179 * (mutex should be acquired by caller). 13180 * 13181 * @param[in] dev 13182 * Pointer to the Ethernet device structure. 13183 * @param[in] handle 13184 * The indirect action object handle to be updated. 13185 * @param[in] update 13186 * Action specification used to modify the action pointed by *handle*. 13187 * *update* could be of same type with the action pointed by the *handle* 13188 * handle argument, or some other structures like a wrapper, depending on 13189 * the indirect action type. 13190 * @param[out] error 13191 * Perform verbose error reporting if not NULL. Initialized in case of 13192 * error only. 13193 * 13194 * @return 13195 * 0 on success, otherwise negative errno value. 13196 */ 13197 static int 13198 flow_dv_action_update(struct rte_eth_dev *dev, 13199 struct rte_flow_action_handle *handle, 13200 const void *update, 13201 struct rte_flow_error *err) 13202 { 13203 uint32_t act_idx = (uint32_t)(uintptr_t)handle; 13204 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 13205 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 13206 const void *action_conf; 13207 13208 switch (type) { 13209 case MLX5_INDIRECT_ACTION_TYPE_RSS: 13210 action_conf = ((const struct rte_flow_action *)update)->conf; 13211 return __flow_dv_action_rss_update(dev, idx, action_conf, err); 13212 default: 13213 return rte_flow_error_set(err, ENOTSUP, 13214 RTE_FLOW_ERROR_TYPE_ACTION, 13215 NULL, 13216 "action type update not supported"); 13217 } 13218 } 13219 13220 static int 13221 flow_dv_action_query(struct rte_eth_dev *dev, 13222 const struct rte_flow_action_handle *handle, void *data, 13223 struct rte_flow_error *error) 13224 { 13225 struct mlx5_age_param *age_param; 13226 struct rte_flow_query_age *resp; 13227 uint32_t act_idx = (uint32_t)(uintptr_t)handle; 13228 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 13229 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 13230 13231 switch (type) { 13232 case MLX5_INDIRECT_ACTION_TYPE_AGE: 13233 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; 13234 resp = data; 13235 resp->aged = __atomic_load_n(&age_param->state, 13236 __ATOMIC_RELAXED) == AGE_TMOUT ? 13237 1 : 0; 13238 resp->sec_since_last_hit_valid = !resp->aged; 13239 if (resp->sec_since_last_hit_valid) 13240 resp->sec_since_last_hit = __atomic_load_n 13241 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); 13242 return 0; 13243 default: 13244 return rte_flow_error_set(error, ENOTSUP, 13245 RTE_FLOW_ERROR_TYPE_ACTION, 13246 NULL, 13247 "action type query not supported"); 13248 } 13249 } 13250 13251 /** 13252 * Query a dv flow rule for its statistics via devx. 13253 * 13254 * @param[in] dev 13255 * Pointer to Ethernet device. 13256 * @param[in] flow 13257 * Pointer to the sub flow. 13258 * @param[out] data 13259 * data retrieved by the query. 13260 * @param[out] error 13261 * Perform verbose error reporting if not NULL. 13262 * 13263 * @return 13264 * 0 on success, a negative errno value otherwise and rte_errno is set. 13265 */ 13266 static int 13267 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, 13268 void *data, struct rte_flow_error *error) 13269 { 13270 struct mlx5_priv *priv = dev->data->dev_private; 13271 struct rte_flow_query_count *qc = data; 13272 13273 if (!priv->config.devx) 13274 return rte_flow_error_set(error, ENOTSUP, 13275 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13276 NULL, 13277 "counters are not supported"); 13278 if (flow->counter) { 13279 uint64_t pkts, bytes; 13280 struct mlx5_flow_counter *cnt; 13281 13282 cnt = flow_dv_counter_get_by_idx(dev, flow->counter, 13283 NULL); 13284 int err = _flow_dv_query_count(dev, flow->counter, &pkts, 13285 &bytes); 13286 13287 if (err) 13288 return rte_flow_error_set(error, -err, 13289 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13290 NULL, "cannot read counters"); 13291 qc->hits_set = 1; 13292 qc->bytes_set = 1; 13293 qc->hits = pkts - cnt->hits; 13294 qc->bytes = bytes - cnt->bytes; 13295 if (qc->reset) { 13296 cnt->hits = pkts; 13297 cnt->bytes = bytes; 13298 } 13299 return 0; 13300 } 13301 return rte_flow_error_set(error, EINVAL, 13302 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13303 NULL, 13304 "counters are not available"); 13305 } 13306 13307 /** 13308 * Query a flow rule AGE action for aging information. 13309 * 13310 * @param[in] dev 13311 * Pointer to Ethernet device. 13312 * @param[in] flow 13313 * Pointer to the sub flow. 13314 * @param[out] data 13315 * data retrieved by the query. 13316 * @param[out] error 13317 * Perform verbose error reporting if not NULL. 13318 * 13319 * @return 13320 * 0 on success, a negative errno value otherwise and rte_errno is set. 13321 */ 13322 static int 13323 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow, 13324 void *data, struct rte_flow_error *error) 13325 { 13326 struct rte_flow_query_age *resp = data; 13327 struct mlx5_age_param *age_param; 13328 13329 if (flow->age) { 13330 struct mlx5_aso_age_action *act = 13331 flow_aso_age_get_by_idx(dev, flow->age); 13332 13333 age_param = &act->age_params; 13334 } else if (flow->counter) { 13335 age_param = flow_dv_counter_idx_get_age(dev, flow->counter); 13336 13337 if (!age_param || !age_param->timeout) 13338 return rte_flow_error_set 13339 (error, EINVAL, 13340 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13341 NULL, "cannot read age data"); 13342 } else { 13343 return rte_flow_error_set(error, EINVAL, 13344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13345 NULL, "age data not available"); 13346 } 13347 resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) == 13348 AGE_TMOUT ? 1 : 0; 13349 resp->sec_since_last_hit_valid = !resp->aged; 13350 if (resp->sec_since_last_hit_valid) 13351 resp->sec_since_last_hit = __atomic_load_n 13352 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); 13353 return 0; 13354 } 13355 13356 /** 13357 * Query a flow. 13358 * 13359 * @see rte_flow_query() 13360 * @see rte_flow_ops 13361 */ 13362 static int 13363 flow_dv_query(struct rte_eth_dev *dev, 13364 struct rte_flow *flow __rte_unused, 13365 const struct rte_flow_action *actions __rte_unused, 13366 void *data __rte_unused, 13367 struct rte_flow_error *error __rte_unused) 13368 { 13369 int ret = -EINVAL; 13370 13371 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 13372 switch (actions->type) { 13373 case RTE_FLOW_ACTION_TYPE_VOID: 13374 break; 13375 case RTE_FLOW_ACTION_TYPE_COUNT: 13376 ret = flow_dv_query_count(dev, flow, data, error); 13377 break; 13378 case RTE_FLOW_ACTION_TYPE_AGE: 13379 ret = flow_dv_query_age(dev, flow, data, error); 13380 break; 13381 default: 13382 return rte_flow_error_set(error, ENOTSUP, 13383 RTE_FLOW_ERROR_TYPE_ACTION, 13384 actions, 13385 "action not supported"); 13386 } 13387 } 13388 return ret; 13389 } 13390 13391 /** 13392 * Destroy the meter table set. 13393 * Lock free, (mutex should be acquired by caller). 13394 * 13395 * @param[in] dev 13396 * Pointer to Ethernet device. 13397 * @param[in] tbl 13398 * Pointer to the meter table set. 13399 * 13400 * @return 13401 * Always 0. 13402 */ 13403 static int 13404 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, 13405 struct mlx5_meter_domains_infos *tbl) 13406 { 13407 struct mlx5_priv *priv = dev->data->dev_private; 13408 struct mlx5_meter_domains_infos *mtd = 13409 (struct mlx5_meter_domains_infos *)tbl; 13410 13411 if (!mtd || !priv->config.dv_flow_en) 13412 return 0; 13413 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) 13414 claim_zero(mlx5_flow_os_destroy_flow 13415 (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); 13416 if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) 13417 claim_zero(mlx5_flow_os_destroy_flow 13418 (mtd->egress.policer_rules[RTE_MTR_DROPPED])); 13419 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) 13420 claim_zero(mlx5_flow_os_destroy_flow 13421 (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); 13422 if (mtd->egress.color_matcher) 13423 claim_zero(mlx5_flow_os_destroy_flow_matcher 13424 (mtd->egress.color_matcher)); 13425 if (mtd->egress.any_matcher) 13426 claim_zero(mlx5_flow_os_destroy_flow_matcher 13427 (mtd->egress.any_matcher)); 13428 if (mtd->egress.tbl) 13429 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl); 13430 if (mtd->egress.sfx_tbl) 13431 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl); 13432 if (mtd->ingress.color_matcher) 13433 claim_zero(mlx5_flow_os_destroy_flow_matcher 13434 (mtd->ingress.color_matcher)); 13435 if (mtd->ingress.any_matcher) 13436 claim_zero(mlx5_flow_os_destroy_flow_matcher 13437 (mtd->ingress.any_matcher)); 13438 if (mtd->ingress.tbl) 13439 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl); 13440 if (mtd->ingress.sfx_tbl) 13441 flow_dv_tbl_resource_release(MLX5_SH(dev), 13442 mtd->ingress.sfx_tbl); 13443 if (mtd->transfer.color_matcher) 13444 claim_zero(mlx5_flow_os_destroy_flow_matcher 13445 (mtd->transfer.color_matcher)); 13446 if (mtd->transfer.any_matcher) 13447 claim_zero(mlx5_flow_os_destroy_flow_matcher 13448 (mtd->transfer.any_matcher)); 13449 if (mtd->transfer.tbl) 13450 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl); 13451 if (mtd->transfer.sfx_tbl) 13452 flow_dv_tbl_resource_release(MLX5_SH(dev), 13453 mtd->transfer.sfx_tbl); 13454 if (mtd->drop_actn) 13455 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); 13456 mlx5_free(mtd); 13457 return 0; 13458 } 13459 13460 /* Number of meter flow actions, count and jump or count and drop. */ 13461 #define METER_ACTIONS 2 13462 13463 /** 13464 * Create specify domain meter table and suffix table. 13465 * 13466 * @param[in] dev 13467 * Pointer to Ethernet device. 13468 * @param[in,out] mtb 13469 * Pointer to DV meter table set. 13470 * @param[in] egress 13471 * Table attribute. 13472 * @param[in] transfer 13473 * Table attribute. 13474 * @param[in] color_reg_c_idx 13475 * Reg C index for color match. 13476 * 13477 * @return 13478 * 0 on success, -1 otherwise and rte_errno is set. 13479 */ 13480 static int 13481 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, 13482 struct mlx5_meter_domains_infos *mtb, 13483 uint8_t egress, uint8_t transfer, 13484 uint32_t color_reg_c_idx) 13485 { 13486 struct mlx5_priv *priv = dev->data->dev_private; 13487 struct mlx5_dev_ctx_shared *sh = priv->sh; 13488 struct mlx5_flow_dv_match_params mask = { 13489 .size = sizeof(mask.buf), 13490 }; 13491 struct mlx5_flow_dv_match_params value = { 13492 .size = sizeof(value.buf), 13493 }; 13494 struct mlx5dv_flow_matcher_attr dv_attr = { 13495 .type = IBV_FLOW_ATTR_NORMAL, 13496 .priority = 0, 13497 .match_criteria_enable = 0, 13498 .match_mask = (void *)&mask, 13499 }; 13500 void *actions[METER_ACTIONS]; 13501 struct mlx5_meter_domain_info *dtb; 13502 struct rte_flow_error error; 13503 int i = 0; 13504 int ret; 13505 13506 if (transfer) 13507 dtb = &mtb->transfer; 13508 else if (egress) 13509 dtb = &mtb->egress; 13510 else 13511 dtb = &mtb->ingress; 13512 /* Create the meter table with METER level. */ 13513 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, 13514 egress, transfer, false, NULL, 0, 13515 0, &error); 13516 if (!dtb->tbl) { 13517 DRV_LOG(ERR, "Failed to create meter policer table."); 13518 return -1; 13519 } 13520 /* Create the meter suffix table with SUFFIX level. */ 13521 dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, 13522 MLX5_FLOW_TABLE_LEVEL_SUFFIX, 13523 egress, transfer, false, NULL, 0, 13524 0, &error); 13525 if (!dtb->sfx_tbl) { 13526 DRV_LOG(ERR, "Failed to create meter suffix table."); 13527 return -1; 13528 } 13529 /* Create matchers, Any and Color. */ 13530 dv_attr.priority = 3; 13531 dv_attr.match_criteria_enable = 0; 13532 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 13533 &dtb->any_matcher); 13534 if (ret) { 13535 DRV_LOG(ERR, "Failed to create meter" 13536 " policer default matcher."); 13537 goto error_exit; 13538 } 13539 dv_attr.priority = 0; 13540 dv_attr.match_criteria_enable = 13541 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 13542 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, 13543 rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); 13544 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 13545 &dtb->color_matcher); 13546 if (ret) { 13547 DRV_LOG(ERR, "Failed to create meter policer color matcher."); 13548 goto error_exit; 13549 } 13550 if (mtb->count_actns[RTE_MTR_DROPPED]) 13551 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; 13552 actions[i++] = mtb->drop_actn; 13553 /* Default rule: lowest priority, match any, actions: drop. */ 13554 ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i, 13555 actions, 13556 &dtb->policer_rules[RTE_MTR_DROPPED]); 13557 if (ret) { 13558 DRV_LOG(ERR, "Failed to create meter policer drop rule."); 13559 goto error_exit; 13560 } 13561 return 0; 13562 error_exit: 13563 return -1; 13564 } 13565 13566 /** 13567 * Create the needed meter and suffix tables. 13568 * Lock free, (mutex should be acquired by caller). 13569 * 13570 * @param[in] dev 13571 * Pointer to Ethernet device. 13572 * @param[in] fm 13573 * Pointer to the flow meter. 13574 * 13575 * @return 13576 * Pointer to table set on success, NULL otherwise and rte_errno is set. 13577 */ 13578 static struct mlx5_meter_domains_infos * 13579 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, 13580 const struct mlx5_flow_meter *fm) 13581 { 13582 struct mlx5_priv *priv = dev->data->dev_private; 13583 struct mlx5_meter_domains_infos *mtb; 13584 int ret; 13585 int i; 13586 13587 if (!priv->mtr_en) { 13588 rte_errno = ENOTSUP; 13589 return NULL; 13590 } 13591 mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY); 13592 if (!mtb) { 13593 DRV_LOG(ERR, "Failed to allocate memory for meter."); 13594 return NULL; 13595 } 13596 /* Create meter count actions */ 13597 for (i = 0; i <= RTE_MTR_DROPPED; i++) { 13598 struct mlx5_flow_counter *cnt; 13599 if (!fm->policer_stats.cnt[i]) 13600 continue; 13601 cnt = flow_dv_counter_get_by_idx(dev, 13602 fm->policer_stats.cnt[i], NULL); 13603 mtb->count_actns[i] = cnt->action; 13604 } 13605 /* Create drop action. */ 13606 ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn); 13607 if (ret) { 13608 DRV_LOG(ERR, "Failed to create drop action."); 13609 goto error_exit; 13610 } 13611 /* Egress meter table. */ 13612 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg); 13613 if (ret) { 13614 DRV_LOG(ERR, "Failed to prepare egress meter table."); 13615 goto error_exit; 13616 } 13617 /* Ingress meter table. */ 13618 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg); 13619 if (ret) { 13620 DRV_LOG(ERR, "Failed to prepare ingress meter table."); 13621 goto error_exit; 13622 } 13623 /* FDB meter table. */ 13624 if (priv->config.dv_esw_en) { 13625 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1, 13626 priv->mtr_color_reg); 13627 if (ret) { 13628 DRV_LOG(ERR, "Failed to prepare fdb meter table."); 13629 goto error_exit; 13630 } 13631 } 13632 return mtb; 13633 error_exit: 13634 flow_dv_destroy_mtr_tbl(dev, mtb); 13635 return NULL; 13636 } 13637 13638 /** 13639 * Destroy domain policer rule. 13640 * 13641 * @param[in] dt 13642 * Pointer to domain table. 13643 */ 13644 static void 13645 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) 13646 { 13647 int i; 13648 13649 for (i = 0; i < RTE_MTR_DROPPED; i++) { 13650 if (dt->policer_rules[i]) { 13651 claim_zero(mlx5_flow_os_destroy_flow 13652 (dt->policer_rules[i])); 13653 dt->policer_rules[i] = NULL; 13654 } 13655 } 13656 if (dt->jump_actn) { 13657 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn)); 13658 dt->jump_actn = NULL; 13659 } 13660 } 13661 13662 /** 13663 * Destroy policer rules. 13664 * 13665 * @param[in] dev 13666 * Pointer to Ethernet device. 13667 * @param[in] fm 13668 * Pointer to flow meter structure. 13669 * @param[in] attr 13670 * Pointer to flow attributes. 13671 * 13672 * @return 13673 * Always 0. 13674 */ 13675 static int 13676 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, 13677 const struct mlx5_flow_meter *fm, 13678 const struct rte_flow_attr *attr) 13679 { 13680 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL; 13681 13682 if (!mtb) 13683 return 0; 13684 if (attr->egress) 13685 flow_dv_destroy_domain_policer_rule(&mtb->egress); 13686 if (attr->ingress) 13687 flow_dv_destroy_domain_policer_rule(&mtb->ingress); 13688 if (attr->transfer) 13689 flow_dv_destroy_domain_policer_rule(&mtb->transfer); 13690 return 0; 13691 } 13692 13693 /** 13694 * Create specify domain meter policer rule. 13695 * 13696 * @param[in] fm 13697 * Pointer to flow meter structure. 13698 * @param[in] mtb 13699 * Pointer to DV meter table set. 13700 * @param[in] mtr_reg_c 13701 * Color match REG_C. 13702 * 13703 * @return 13704 * 0 on success, -1 otherwise. 13705 */ 13706 static int 13707 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, 13708 struct mlx5_meter_domain_info *dtb, 13709 uint8_t mtr_reg_c) 13710 { 13711 struct mlx5_flow_dv_match_params matcher = { 13712 .size = sizeof(matcher.buf), 13713 }; 13714 struct mlx5_flow_dv_match_params value = { 13715 .size = sizeof(value.buf), 13716 }; 13717 struct mlx5_meter_domains_infos *mtb = fm->mfts; 13718 void *actions[METER_ACTIONS]; 13719 int i; 13720 int ret = 0; 13721 13722 /* Create jump action. */ 13723 if (!dtb->jump_actn) 13724 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 13725 (dtb->sfx_tbl->obj, &dtb->jump_actn); 13726 if (ret) { 13727 DRV_LOG(ERR, "Failed to create policer jump action."); 13728 goto error; 13729 } 13730 for (i = 0; i < RTE_MTR_DROPPED; i++) { 13731 int j = 0; 13732 13733 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c, 13734 rte_col_2_mlx5_col(i), UINT8_MAX); 13735 if (mtb->count_actns[i]) 13736 actions[j++] = mtb->count_actns[i]; 13737 if (fm->action[i] == MTR_POLICER_ACTION_DROP) 13738 actions[j++] = mtb->drop_actn; 13739 else 13740 actions[j++] = dtb->jump_actn; 13741 ret = mlx5_flow_os_create_flow(dtb->color_matcher, 13742 (void *)&value, j, actions, 13743 &dtb->policer_rules[i]); 13744 if (ret) { 13745 DRV_LOG(ERR, "Failed to create policer rule."); 13746 goto error; 13747 } 13748 } 13749 return 0; 13750 error: 13751 rte_errno = errno; 13752 return -1; 13753 } 13754 13755 /** 13756 * Create policer rules. 13757 * 13758 * @param[in] dev 13759 * Pointer to Ethernet device. 13760 * @param[in] fm 13761 * Pointer to flow meter structure. 13762 * @param[in] attr 13763 * Pointer to flow attributes. 13764 * 13765 * @return 13766 * 0 on success, -1 otherwise. 13767 */ 13768 static int 13769 flow_dv_create_policer_rules(struct rte_eth_dev *dev, 13770 struct mlx5_flow_meter *fm, 13771 const struct rte_flow_attr *attr) 13772 { 13773 struct mlx5_priv *priv = dev->data->dev_private; 13774 struct mlx5_meter_domains_infos *mtb = fm->mfts; 13775 int ret; 13776 13777 if (attr->egress) { 13778 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress, 13779 priv->mtr_color_reg); 13780 if (ret) { 13781 DRV_LOG(ERR, "Failed to create egress policer."); 13782 goto error; 13783 } 13784 } 13785 if (attr->ingress) { 13786 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress, 13787 priv->mtr_color_reg); 13788 if (ret) { 13789 DRV_LOG(ERR, "Failed to create ingress policer."); 13790 goto error; 13791 } 13792 } 13793 if (attr->transfer) { 13794 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer, 13795 priv->mtr_color_reg); 13796 if (ret) { 13797 DRV_LOG(ERR, "Failed to create transfer policer."); 13798 goto error; 13799 } 13800 } 13801 return 0; 13802 error: 13803 flow_dv_destroy_policer_rules(dev, fm, attr); 13804 return -1; 13805 } 13806 13807 /** 13808 * Validate the batch counter support in root table. 13809 * 13810 * Create a simple flow with invalid counter and drop action on root table to 13811 * validate if batch counter with offset on root table is supported or not. 13812 * 13813 * @param[in] dev 13814 * Pointer to rte_eth_dev structure. 13815 * 13816 * @return 13817 * 0 on success, a negative errno value otherwise and rte_errno is set. 13818 */ 13819 int 13820 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) 13821 { 13822 struct mlx5_priv *priv = dev->data->dev_private; 13823 struct mlx5_dev_ctx_shared *sh = priv->sh; 13824 struct mlx5_flow_dv_match_params mask = { 13825 .size = sizeof(mask.buf), 13826 }; 13827 struct mlx5_flow_dv_match_params value = { 13828 .size = sizeof(value.buf), 13829 }; 13830 struct mlx5dv_flow_matcher_attr dv_attr = { 13831 .type = IBV_FLOW_ATTR_NORMAL, 13832 .priority = 0, 13833 .match_criteria_enable = 0, 13834 .match_mask = (void *)&mask, 13835 }; 13836 void *actions[2] = { 0 }; 13837 struct mlx5_flow_tbl_resource *tbl = NULL; 13838 struct mlx5_devx_obj *dcs = NULL; 13839 void *matcher = NULL; 13840 void *flow = NULL; 13841 int ret = -1; 13842 13843 tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL); 13844 if (!tbl) 13845 goto err; 13846 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); 13847 if (!dcs) 13848 goto err; 13849 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX, 13850 &actions[0]); 13851 if (ret) 13852 goto err; 13853 actions[1] = sh->dr_drop_action ? sh->dr_drop_action : 13854 priv->drop_queue.hrxq->action; 13855 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); 13856 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, 13857 &matcher); 13858 if (ret) 13859 goto err; 13860 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2, 13861 actions, &flow); 13862 err: 13863 /* 13864 * If batch counter with offset is not supported, the driver will not 13865 * validate the invalid offset value, flow create should success. 13866 * In this case, it means batch counter is not supported in root table. 13867 * 13868 * Otherwise, if flow create is failed, counter offset is supported. 13869 */ 13870 if (flow) { 13871 DRV_LOG(INFO, "Batch counter is not supported in root " 13872 "table. Switch to fallback mode."); 13873 rte_errno = ENOTSUP; 13874 ret = -rte_errno; 13875 claim_zero(mlx5_flow_os_destroy_flow(flow)); 13876 } else { 13877 /* Check matcher to make sure validate fail at flow create. */ 13878 if (!matcher || (matcher && errno != EINVAL)) 13879 DRV_LOG(ERR, "Unexpected error in counter offset " 13880 "support detection"); 13881 ret = 0; 13882 } 13883 if (actions[0]) 13884 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0])); 13885 if (matcher) 13886 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); 13887 if (tbl) 13888 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); 13889 if (dcs) 13890 claim_zero(mlx5_devx_cmd_destroy(dcs)); 13891 return ret; 13892 } 13893 13894 /** 13895 * Query a devx counter. 13896 * 13897 * @param[in] dev 13898 * Pointer to the Ethernet device structure. 13899 * @param[in] cnt 13900 * Index to the flow counter. 13901 * @param[in] clear 13902 * Set to clear the counter statistics. 13903 * @param[out] pkts 13904 * The statistics value of packets. 13905 * @param[out] bytes 13906 * The statistics value of bytes. 13907 * 13908 * @return 13909 * 0 on success, otherwise return -1. 13910 */ 13911 static int 13912 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, 13913 uint64_t *pkts, uint64_t *bytes) 13914 { 13915 struct mlx5_priv *priv = dev->data->dev_private; 13916 struct mlx5_flow_counter *cnt; 13917 uint64_t inn_pkts, inn_bytes; 13918 int ret; 13919 13920 if (!priv->config.devx) 13921 return -1; 13922 13923 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); 13924 if (ret) 13925 return -1; 13926 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); 13927 *pkts = inn_pkts - cnt->hits; 13928 *bytes = inn_bytes - cnt->bytes; 13929 if (clear) { 13930 cnt->hits = inn_pkts; 13931 cnt->bytes = inn_bytes; 13932 } 13933 return 0; 13934 } 13935 13936 /** 13937 * Get aged-out flows. 13938 * 13939 * @param[in] dev 13940 * Pointer to the Ethernet device structure. 13941 * @param[in] context 13942 * The address of an array of pointers to the aged-out flows contexts. 13943 * @param[in] nb_contexts 13944 * The length of context array pointers. 13945 * @param[out] error 13946 * Perform verbose error reporting if not NULL. Initialized in case of 13947 * error only. 13948 * 13949 * @return 13950 * how many contexts get in success, otherwise negative errno value. 13951 * if nb_contexts is 0, return the amount of all aged contexts. 13952 * if nb_contexts is not 0 , return the amount of aged flows reported 13953 * in the context array. 13954 * @note: only stub for now 13955 */ 13956 static int 13957 flow_get_aged_flows(struct rte_eth_dev *dev, 13958 void **context, 13959 uint32_t nb_contexts, 13960 struct rte_flow_error *error) 13961 { 13962 struct mlx5_priv *priv = dev->data->dev_private; 13963 struct mlx5_age_info *age_info; 13964 struct mlx5_age_param *age_param; 13965 struct mlx5_flow_counter *counter; 13966 struct mlx5_aso_age_action *act; 13967 int nb_flows = 0; 13968 13969 if (nb_contexts && !context) 13970 return rte_flow_error_set(error, EINVAL, 13971 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 13972 NULL, "empty context"); 13973 age_info = GET_PORT_AGE_INFO(priv); 13974 rte_spinlock_lock(&age_info->aged_sl); 13975 LIST_FOREACH(act, &age_info->aged_aso, next) { 13976 nb_flows++; 13977 if (nb_contexts) { 13978 context[nb_flows - 1] = 13979 act->age_params.context; 13980 if (!(--nb_contexts)) 13981 break; 13982 } 13983 } 13984 TAILQ_FOREACH(counter, &age_info->aged_counters, next) { 13985 nb_flows++; 13986 if (nb_contexts) { 13987 age_param = MLX5_CNT_TO_AGE(counter); 13988 context[nb_flows - 1] = age_param->context; 13989 if (!(--nb_contexts)) 13990 break; 13991 } 13992 } 13993 rte_spinlock_unlock(&age_info->aged_sl); 13994 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 13995 return nb_flows; 13996 } 13997 13998 /* 13999 * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). 14000 */ 14001 static uint32_t 14002 flow_dv_counter_allocate(struct rte_eth_dev *dev) 14003 { 14004 return flow_dv_counter_alloc(dev, 0); 14005 } 14006 14007 /** 14008 * Validate indirect action. 14009 * Dispatcher for action type specific validation. 14010 * 14011 * @param[in] dev 14012 * Pointer to the Ethernet device structure. 14013 * @param[in] conf 14014 * Shared action configuration. 14015 * @param[in] action 14016 * The indirect action object to validate. 14017 * @param[out] error 14018 * Perform verbose error reporting if not NULL. Initialized in case of 14019 * error only. 14020 * 14021 * @return 14022 * 0 on success, otherwise negative errno value. 14023 */ 14024 static int 14025 flow_dv_action_validate(struct rte_eth_dev *dev, 14026 const struct rte_flow_indir_action_conf *conf, 14027 const struct rte_flow_action *action, 14028 struct rte_flow_error *err) 14029 { 14030 struct mlx5_priv *priv = dev->data->dev_private; 14031 14032 RTE_SET_USED(conf); 14033 switch (action->type) { 14034 case RTE_FLOW_ACTION_TYPE_RSS: 14035 /* 14036 * priv->obj_ops is set according to driver capabilities. 14037 * When DevX capabilities are 14038 * sufficient, it is set to devx_obj_ops. 14039 * Otherwise, it is set to ibv_obj_ops. 14040 * ibv_obj_ops doesn't support ind_table_modify operation. 14041 * In this case the shared RSS action can't be used. 14042 */ 14043 if (priv->obj_ops.ind_table_modify == NULL) 14044 return rte_flow_error_set 14045 (err, ENOTSUP, 14046 RTE_FLOW_ERROR_TYPE_ACTION, 14047 NULL, 14048 "shared RSS action not supported"); 14049 return mlx5_validate_action_rss(dev, action, err); 14050 case RTE_FLOW_ACTION_TYPE_AGE: 14051 if (!priv->sh->aso_age_mng) 14052 return rte_flow_error_set(err, ENOTSUP, 14053 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 14054 NULL, 14055 "shared age action not supported"); 14056 return flow_dv_validate_action_age(0, action, dev, err); 14057 default: 14058 return rte_flow_error_set(err, ENOTSUP, 14059 RTE_FLOW_ERROR_TYPE_ACTION, 14060 NULL, 14061 "action type not supported"); 14062 } 14063 } 14064 14065 static int 14066 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) 14067 { 14068 struct mlx5_priv *priv = dev->data->dev_private; 14069 int ret = 0; 14070 14071 if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { 14072 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain, 14073 flags); 14074 if (ret != 0) 14075 return ret; 14076 } 14077 if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { 14078 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags); 14079 if (ret != 0) 14080 return ret; 14081 } 14082 if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { 14083 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags); 14084 if (ret != 0) 14085 return ret; 14086 } 14087 return 0; 14088 } 14089 14090 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { 14091 .validate = flow_dv_validate, 14092 .prepare = flow_dv_prepare, 14093 .translate = flow_dv_translate, 14094 .apply = flow_dv_apply, 14095 .remove = flow_dv_remove, 14096 .destroy = flow_dv_destroy, 14097 .query = flow_dv_query, 14098 .create_mtr_tbls = flow_dv_create_mtr_tbl, 14099 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl, 14100 .create_policer_rules = flow_dv_create_policer_rules, 14101 .destroy_policer_rules = flow_dv_destroy_policer_rules, 14102 .counter_alloc = flow_dv_counter_allocate, 14103 .counter_free = flow_dv_counter_free, 14104 .counter_query = flow_dv_counter_query, 14105 .get_aged_flows = flow_get_aged_flows, 14106 .action_validate = flow_dv_action_validate, 14107 .action_create = flow_dv_action_create, 14108 .action_destroy = flow_dv_action_destroy, 14109 .action_update = flow_dv_action_update, 14110 .action_query = flow_dv_action_query, 14111 .sync_domain = flow_dv_sync_domain, 14112 }; 14113 14114 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 14115 14116