1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2024 NVIDIA Corporation & Affiliates 3 */ 4 5 #include <rte_flow.h> 6 7 #include <mlx5_malloc.h> 8 #include "mlx5.h" 9 #include "mlx5_defs.h" 10 #include "mlx5_flow.h" 11 #include "mlx5_rx.h" 12 #include "rte_common.h" 13 14 #ifdef HAVE_MLX5_HWS_SUPPORT 15 16 struct mlx5_nta_rss_ctx { 17 struct rte_eth_dev *dev; 18 struct rte_flow_attr *attr; 19 struct rte_flow_item *pattern; 20 struct rte_flow_action *actions; 21 const struct rte_flow_action_rss *rss_conf; 22 struct rte_flow_error *error; 23 struct mlx5_nta_rss_flow_head *head; 24 uint64_t pattern_flags; 25 enum mlx5_flow_type flow_type; 26 bool external; 27 }; 28 29 #define MLX5_RSS_PTYPE_ITEM_INDEX 0 30 #ifdef MLX5_RSS_PTYPE_DEBUG 31 #define MLX5_RSS_PTYPE_ACTION_INDEX 1 32 #else 33 #define MLX5_RSS_PTYPE_ACTION_INDEX 0 34 #endif 35 36 #define MLX5_RSS_PTYPE_ITEMS_NUM (MLX5_RSS_PTYPE_ITEM_INDEX + 2) 37 #define MLX5_RSS_PTYPE_ACTIONS_NUM (MLX5_RSS_PTYPE_ACTION_INDEX + 2) 38 39 static int 40 mlx5_nta_ptype_rss_flow_create(struct mlx5_nta_rss_ctx *ctx, 41 uint32_t ptype, uint64_t rss_type) 42 { 43 int ret; 44 struct rte_flow_hw *flow; 45 struct rte_flow_item_ptype *ptype_spec = (void *)(uintptr_t) 46 ctx->pattern[MLX5_RSS_PTYPE_ITEM_INDEX].spec; 47 struct rte_flow_action_rss *rss_conf = (void *)(uintptr_t) 48 ctx->actions[MLX5_RSS_PTYPE_ACTION_INDEX].conf; 49 bool dbg_log = rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG); 50 uint32_t mark_id = 0; 51 #ifdef MLX5_RSS_PTYPE_DEBUG 52 struct rte_flow_action_mark *mark = (void *)(uintptr_t) 53 ctx->actions[MLX5_RSS_PTYPE_ACTION_INDEX - 1].conf; 54 55 /* 56 * Inner L3 and L4 ptype values are too large for 24bit mark 57 */ 58 mark->id = 59 ((ptype & (RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK)) == ptype) ? 60 ptype >> 20 : ptype; 61 mark_id = mark->id; 62 dbg_log = true; 63 #endif 64 ptype_spec->packet_type = ptype; 65 rss_conf->types = rss_type; 66 ret = flow_hw_create_flow(ctx->dev, MLX5_FLOW_TYPE_GEN, ctx->attr, 67 ctx->pattern, ctx->actions, 68 MLX5_FLOW_ITEM_PTYPE, MLX5_FLOW_ACTION_RSS, 69 ctx->external, &flow, ctx->error); 70 if (flow) { 71 SLIST_INSERT_HEAD(ctx->head, flow, nt2hws->next); 72 if (dbg_log) { 73 DRV_LOG(NOTICE, 74 "PTYPE RSS: group %u ptype spec %#x rss types %#lx mark %#x\n", 75 ctx->attr->group, ptype_spec->packet_type, 76 (unsigned long)rss_conf->types, mark_id); 77 } 78 } 79 return ret; 80 } 81 82 /* 83 * Call conditions: 84 * * Flow pattern did not include outer L3 and L4 items. 85 * * RSS configuration had L3 hash types. 86 */ 87 static struct rte_flow_hw * 88 mlx5_hw_rss_expand_l3(struct mlx5_nta_rss_ctx *rss_ctx) 89 { 90 int ret; 91 int ptype_ip4, ptype_ip6; 92 uint64_t rss_types = rte_eth_rss_hf_refine(rss_ctx->rss_conf->types); 93 94 if (rss_ctx->rss_conf->level < 2) { 95 ptype_ip4 = RTE_PTYPE_L3_IPV4; 96 ptype_ip6 = RTE_PTYPE_L3_IPV6; 97 } else { 98 ptype_ip4 = RTE_PTYPE_INNER_L3_IPV4; 99 ptype_ip6 = RTE_PTYPE_INNER_L3_IPV6; 100 } 101 if (rss_types & MLX5_IPV4_LAYER_TYPES) { 102 ret = mlx5_nta_ptype_rss_flow_create 103 (rss_ctx, ptype_ip4, (rss_types & ~MLX5_IPV6_LAYER_TYPES)); 104 if (ret) 105 goto error; 106 } 107 if (rss_types & MLX5_IPV6_LAYER_TYPES) { 108 ret = mlx5_nta_ptype_rss_flow_create 109 (rss_ctx, ptype_ip6, rss_types & ~MLX5_IPV4_LAYER_TYPES); 110 if (ret) 111 goto error; 112 } 113 return SLIST_FIRST(rss_ctx->head); 114 115 error: 116 flow_hw_list_destroy(rss_ctx->dev, rss_ctx->flow_type, 117 (uintptr_t)SLIST_FIRST(rss_ctx->head)); 118 return NULL; 119 } 120 121 static void 122 mlx5_nta_rss_expand_l3_l4(struct mlx5_nta_rss_ctx *rss_ctx, 123 uint64_t rss_types, uint64_t rss_l3_types) 124 { 125 int ret; 126 int ptype_l3, ptype_l4_udp, ptype_l4_tcp, ptype_l4_esp = 0; 127 uint64_t rss = rss_types & 128 ~(rss_l3_types == MLX5_IPV4_LAYER_TYPES ? 129 MLX5_IPV6_LAYER_TYPES : MLX5_IPV4_LAYER_TYPES); 130 131 132 if (rss_ctx->rss_conf->level < 2) { 133 ptype_l3 = rss_l3_types == MLX5_IPV4_LAYER_TYPES ? 134 RTE_PTYPE_L3_IPV4 : RTE_PTYPE_L3_IPV6; 135 ptype_l4_esp = RTE_PTYPE_TUNNEL_ESP; 136 ptype_l4_udp = RTE_PTYPE_L4_UDP; 137 ptype_l4_tcp = RTE_PTYPE_L4_TCP; 138 } else { 139 ptype_l3 = rss_l3_types == MLX5_IPV4_LAYER_TYPES ? 140 RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_INNER_L3_IPV6; 141 ptype_l4_udp = RTE_PTYPE_INNER_L4_UDP; 142 ptype_l4_tcp = RTE_PTYPE_INNER_L4_TCP; 143 } 144 if (rss_types & RTE_ETH_RSS_ESP) { 145 ret = mlx5_nta_ptype_rss_flow_create 146 (rss_ctx, ptype_l3 | ptype_l4_esp, 147 rss & ~(RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)); 148 if (ret) 149 goto error; 150 } 151 if (rss_types & RTE_ETH_RSS_UDP) { 152 ret = mlx5_nta_ptype_rss_flow_create(rss_ctx, 153 ptype_l3 | ptype_l4_udp, 154 rss & ~(RTE_ETH_RSS_ESP | RTE_ETH_RSS_TCP)); 155 if (ret) 156 goto error; 157 } 158 if (rss_types & RTE_ETH_RSS_TCP) { 159 ret = mlx5_nta_ptype_rss_flow_create(rss_ctx, 160 ptype_l3 | ptype_l4_tcp, 161 rss & ~(RTE_ETH_RSS_ESP | RTE_ETH_RSS_UDP)); 162 if (ret) 163 goto error; 164 } 165 return; 166 error: 167 flow_hw_list_destroy(rss_ctx->dev, rss_ctx->flow_type, 168 (uintptr_t)SLIST_FIRST(rss_ctx->head)); 169 } 170 171 /* 172 * Call conditions: 173 * * Flow pattern did not include L4 item. 174 * * RSS configuration had L4 hash types. 175 */ 176 static struct rte_flow_hw * 177 mlx5_hw_rss_expand_l4(struct mlx5_nta_rss_ctx *rss_ctx) 178 { 179 uint64_t rss_types = rte_eth_rss_hf_refine(rss_ctx->rss_conf->types); 180 uint64_t l3_item = rss_ctx->pattern_flags & 181 (rss_ctx->rss_conf->level < 2 ? 182 MLX5_FLOW_LAYER_OUTER_L3 : MLX5_FLOW_LAYER_INNER_L3); 183 184 if (l3_item) { 185 /* 186 * Outer L3 header was present in the original pattern. 187 * Expand L4 level only. 188 */ 189 if (l3_item & MLX5_FLOW_LAYER_L3_IPV4) 190 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, MLX5_IPV4_LAYER_TYPES); 191 else 192 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, MLX5_IPV6_LAYER_TYPES); 193 } else { 194 if (rss_types & (MLX5_IPV4_LAYER_TYPES | MLX5_IPV6_LAYER_TYPES)) { 195 mlx5_hw_rss_expand_l3(rss_ctx); 196 /* 197 * No outer L3 item in application flow pattern. 198 * RSS hash types are L3 and L4. 199 * ** Expand L3 according to RSS configuration and L4. 200 */ 201 if (rss_types & MLX5_IPV4_LAYER_TYPES) 202 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, 203 MLX5_IPV4_LAYER_TYPES); 204 if (rss_types & MLX5_IPV6_LAYER_TYPES) 205 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, 206 MLX5_IPV6_LAYER_TYPES); 207 } else { 208 /* 209 * No outer L3 item in application flow pattern, 210 * RSS hash type is L4 only. 211 */ 212 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, 213 MLX5_IPV4_LAYER_TYPES); 214 mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, 215 MLX5_IPV6_LAYER_TYPES); 216 } 217 } 218 return SLIST_EMPTY(rss_ctx->head) ? NULL : SLIST_FIRST(rss_ctx->head); 219 } 220 221 static struct mlx5_indexed_pool * 222 mlx5_nta_ptype_ipool_create(struct rte_eth_dev *dev) 223 { 224 struct mlx5_priv *priv = dev->data->dev_private; 225 struct mlx5_indexed_pool_config ipool_cfg = { 226 .size = 1, 227 .trunk_size = 32, 228 .grow_trunk = 5, 229 .grow_shift = 1, 230 .need_lock = 1, 231 .release_mem_en = !!priv->sh->config.reclaim_mode, 232 .malloc = mlx5_malloc, 233 .max_idx = MLX5_FLOW_TABLE_PTYPE_RSS_NUM, 234 .free = mlx5_free, 235 .type = "mlx5_nta_ptype_rss" 236 }; 237 return mlx5_ipool_create(&ipool_cfg); 238 } 239 240 static void 241 mlx5_hw_release_rss_ptype_group(struct rte_eth_dev *dev, uint32_t group) 242 { 243 struct mlx5_priv *priv = dev->data->dev_private; 244 245 if (!priv->ptype_rss_groups) 246 return; 247 mlx5_ipool_free(priv->ptype_rss_groups, group); 248 } 249 250 static uint32_t 251 mlx5_hw_get_rss_ptype_group(struct rte_eth_dev *dev) 252 { 253 void *obj; 254 uint32_t idx = 0; 255 struct mlx5_priv *priv = dev->data->dev_private; 256 257 if (!priv->ptype_rss_groups) { 258 priv->ptype_rss_groups = mlx5_nta_ptype_ipool_create(dev); 259 if (!priv->ptype_rss_groups) { 260 DRV_LOG(DEBUG, "PTYPE RSS: failed to allocate groups pool"); 261 return 0; 262 } 263 } 264 obj = mlx5_ipool_malloc(priv->ptype_rss_groups, &idx); 265 if (!obj) { 266 DRV_LOG(DEBUG, "PTYPE RSS: failed to fetch ptype group from the pool"); 267 return 0; 268 } 269 return idx + MLX5_FLOW_TABLE_PTYPE_RSS_BASE; 270 } 271 272 static struct rte_flow_hw * 273 mlx5_hw_rss_ptype_create_miss_flow(struct rte_eth_dev *dev, 274 const struct rte_flow_action_rss *rss_conf, 275 uint32_t ptype_group, bool external, 276 struct rte_flow_error *error) 277 { 278 struct rte_flow_hw *flow = NULL; 279 const struct rte_flow_attr miss_attr = { 280 .ingress = 1, 281 .group = ptype_group, 282 .priority = 3 283 }; 284 const struct rte_flow_item miss_pattern[2] = { 285 [0] = { .type = RTE_FLOW_ITEM_TYPE_ETH }, 286 [1] = { .type = RTE_FLOW_ITEM_TYPE_END } 287 }; 288 struct rte_flow_action miss_actions[] = { 289 #ifdef MLX5_RSS_PTYPE_DEBUG 290 [MLX5_RSS_PTYPE_ACTION_INDEX - 1] = { 291 .type = RTE_FLOW_ACTION_TYPE_MARK, 292 .conf = &(const struct rte_flow_action_mark){.id = 0xfac} 293 }, 294 #endif 295 [MLX5_RSS_PTYPE_ACTION_INDEX] = { 296 .type = RTE_FLOW_ACTION_TYPE_RSS, 297 .conf = rss_conf 298 }, 299 [MLX5_RSS_PTYPE_ACTION_INDEX + 1] = { .type = RTE_FLOW_ACTION_TYPE_END } 300 }; 301 302 flow_hw_create_flow(dev, MLX5_FLOW_TYPE_GEN, &miss_attr, 303 miss_pattern, miss_actions, 0, MLX5_FLOW_ACTION_RSS, 304 external, &flow, error); 305 return flow; 306 } 307 308 static struct rte_flow_hw * 309 mlx5_hw_rss_ptype_create_base_flow(struct rte_eth_dev *dev, 310 const struct rte_flow_attr *attr, 311 const struct rte_flow_item pattern[], 312 const struct rte_flow_action orig_actions[], 313 uint32_t ptype_group, uint64_t item_flags, 314 uint64_t action_flags, bool external, 315 enum mlx5_flow_type flow_type, 316 struct rte_flow_error *error) 317 { 318 int i = 0; 319 struct rte_flow_hw *flow = NULL; 320 struct rte_flow_action actions[MLX5_HW_MAX_ACTS]; 321 enum mlx5_indirect_type indirect_type; 322 323 do { 324 switch (orig_actions[i].type) { 325 case RTE_FLOW_ACTION_TYPE_INDIRECT: 326 indirect_type = (typeof(indirect_type)) 327 MLX5_INDIRECT_ACTION_TYPE_GET 328 (orig_actions[i].conf); 329 if (indirect_type != MLX5_INDIRECT_ACTION_TYPE_RSS) { 330 actions[i] = orig_actions[i]; 331 break; 332 } 333 /* Fall through */ 334 case RTE_FLOW_ACTION_TYPE_RSS: 335 actions[i].type = RTE_FLOW_ACTION_TYPE_JUMP; 336 actions[i].conf = &(const struct rte_flow_action_jump) { 337 .group = ptype_group 338 }; 339 break; 340 default: 341 actions[i] = orig_actions[i]; 342 } 343 344 } while (actions[i++].type != RTE_FLOW_ACTION_TYPE_END); 345 action_flags &= ~MLX5_FLOW_ACTION_RSS; 346 action_flags |= MLX5_FLOW_ACTION_JUMP; 347 flow_hw_create_flow(dev, flow_type, attr, pattern, actions, 348 item_flags, action_flags, external, &flow, error); 349 return flow; 350 } 351 352 const struct rte_flow_action_rss * 353 flow_nta_locate_rss(struct rte_eth_dev *dev, 354 const struct rte_flow_action actions[], 355 struct rte_flow_error *error) 356 { 357 const struct rte_flow_action *a; 358 const struct rte_flow_action_rss *rss_conf = NULL; 359 360 for (a = actions; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { 361 if (a->type == RTE_FLOW_ACTION_TYPE_RSS) { 362 rss_conf = a->conf; 363 break; 364 } 365 if (a->type == RTE_FLOW_ACTION_TYPE_INDIRECT && 366 MLX5_INDIRECT_ACTION_TYPE_GET(a->conf) == 367 MLX5_INDIRECT_ACTION_TYPE_RSS) { 368 struct mlx5_priv *priv = dev->data->dev_private; 369 struct mlx5_shared_action_rss *shared_rss; 370 uint32_t handle = (uint32_t)(uintptr_t)a->conf; 371 372 shared_rss = mlx5_ipool_get 373 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 374 MLX5_INDIRECT_ACTION_IDX_GET(handle)); 375 if (!shared_rss) { 376 rte_flow_error_set(error, EINVAL, 377 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 378 a->conf, "invalid shared RSS handle"); 379 return NULL; 380 } 381 rss_conf = &shared_rss->origin; 382 break; 383 } 384 } 385 if (a->type == RTE_FLOW_ACTION_TYPE_END) { 386 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL); 387 return NULL; 388 } 389 return rss_conf; 390 } 391 392 static __rte_always_inline void 393 mlx5_nta_rss_init_ptype_ctx(struct mlx5_nta_rss_ctx *rss_ctx, 394 struct rte_eth_dev *dev, 395 struct rte_flow_attr *ptype_attr, 396 struct rte_flow_item *ptype_pattern, 397 struct rte_flow_action *ptype_actions, 398 const struct rte_flow_action_rss *rss_conf, 399 struct mlx5_nta_rss_flow_head *head, 400 struct rte_flow_error *error, 401 uint64_t item_flags, 402 enum mlx5_flow_type flow_type, bool external) 403 { 404 rss_ctx->dev = dev; 405 rss_ctx->attr = ptype_attr; 406 rss_ctx->pattern = ptype_pattern; 407 rss_ctx->actions = ptype_actions; 408 rss_ctx->rss_conf = rss_conf; 409 rss_ctx->error = error; 410 rss_ctx->head = head; 411 rss_ctx->pattern_flags = item_flags; 412 rss_ctx->flow_type = flow_type; 413 rss_ctx->external = external; 414 } 415 416 /* 417 * MLX5 HW hashes IPv4 and IPv6 L3 headers and UDP, TCP, ESP L4 headers. 418 * RSS expansion is required when RSS action was configured to hash 419 * network protocol that was not mentioned in flow pattern. 420 * 421 */ 422 #define MLX5_PTYPE_RSS_OUTER_MASK (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6 | \ 423 RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP | \ 424 RTE_PTYPE_TUNNEL_ESP) 425 #define MLX5_PTYPE_RSS_INNER_MASK (RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L3_IPV6 | \ 426 RTE_PTYPE_INNER_L4_TCP | RTE_PTYPE_INNER_L4_UDP) 427 428 struct rte_flow_hw * 429 flow_nta_handle_rss(struct rte_eth_dev *dev, 430 const struct rte_flow_attr *attr, 431 const struct rte_flow_item items[], 432 const struct rte_flow_action actions[], 433 const struct rte_flow_action_rss *rss_conf, 434 uint64_t item_flags, uint64_t action_flags, 435 bool external, enum mlx5_flow_type flow_type, 436 struct rte_flow_error *error) 437 { 438 struct rte_flow_hw *rss_base = NULL, *rss_next = NULL, *rss_miss = NULL; 439 struct rte_flow_action_rss ptype_rss_conf; 440 struct mlx5_nta_rss_ctx rss_ctx; 441 uint64_t rss_types = rte_eth_rss_hf_refine(rss_conf->types); 442 bool inner_rss = rss_conf->level > 1; 443 bool outer_rss = !inner_rss; 444 bool l3_item = (outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 445 (inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L3)); 446 bool l4_item = (outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L4)) || 447 (inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L4)); 448 bool l3_hash = rss_types & (MLX5_IPV4_LAYER_TYPES | MLX5_IPV6_LAYER_TYPES); 449 bool l4_hash = rss_types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_ESP); 450 struct mlx5_nta_rss_flow_head expansion_head = SLIST_HEAD_INITIALIZER(0); 451 struct rte_flow_attr ptype_attr = { 452 .ingress = 1 453 }; 454 struct rte_flow_item_ptype ptype_spec = { .packet_type = 0 }; 455 const struct rte_flow_item_ptype ptype_mask = { 456 .packet_type = outer_rss ? 457 MLX5_PTYPE_RSS_OUTER_MASK : MLX5_PTYPE_RSS_INNER_MASK 458 }; 459 struct rte_flow_item ptype_pattern[MLX5_RSS_PTYPE_ITEMS_NUM] = { 460 [MLX5_RSS_PTYPE_ITEM_INDEX] = { 461 .type = RTE_FLOW_ITEM_TYPE_PTYPE, 462 .spec = &ptype_spec, 463 .mask = &ptype_mask 464 }, 465 [MLX5_RSS_PTYPE_ITEM_INDEX + 1] = { .type = RTE_FLOW_ITEM_TYPE_END } 466 }; 467 struct rte_flow_action ptype_actions[MLX5_RSS_PTYPE_ACTIONS_NUM] = { 468 #ifdef MLX5_RSS_PTYPE_DEBUG 469 [MLX5_RSS_PTYPE_ACTION_INDEX - 1] = { 470 .type = RTE_FLOW_ACTION_TYPE_MARK, 471 .conf = &(const struct rte_flow_action_mark) {.id = 101} 472 }, 473 #endif 474 [MLX5_RSS_PTYPE_ACTION_INDEX] = { 475 .type = RTE_FLOW_ACTION_TYPE_RSS, 476 .conf = &ptype_rss_conf 477 }, 478 [MLX5_RSS_PTYPE_ACTION_INDEX + 1] = { .type = RTE_FLOW_ACTION_TYPE_END } 479 }; 480 481 if (l4_item) { 482 /* 483 * Original flow pattern extended up to L4 level. 484 * L4 is the maximal expansion level. 485 * Original pattern does not need expansion. 486 */ 487 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL); 488 return NULL; 489 } 490 if (!l4_hash) { 491 if (!l3_hash) { 492 /* 493 * RSS action was not configured to hash L3 or L4. 494 * No expansion needed. 495 */ 496 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL); 497 return NULL; 498 } 499 if (l3_item) { 500 /* 501 * Original flow pattern extended up to L3 level. 502 * RSS action was not set for L4 hash. 503 * Original pattern does not need expansion. 504 */ 505 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL); 506 return NULL; 507 } 508 } 509 /* Create RSS expansions in dedicated PTYPE flow group */ 510 ptype_attr.group = mlx5_hw_get_rss_ptype_group(dev); 511 if (!ptype_attr.group) { 512 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 513 NULL, "cannot get RSS PTYPE group"); 514 return NULL; 515 } 516 ptype_rss_conf = *rss_conf; 517 mlx5_nta_rss_init_ptype_ctx(&rss_ctx, dev, &ptype_attr, ptype_pattern, 518 ptype_actions, rss_conf, &expansion_head, 519 error, item_flags, flow_type, external); 520 rss_miss = mlx5_hw_rss_ptype_create_miss_flow(dev, rss_conf, ptype_attr.group, 521 external, error); 522 if (!rss_miss) 523 goto error; 524 if (l4_hash) { 525 rss_next = mlx5_hw_rss_expand_l4(&rss_ctx); 526 if (!rss_next) 527 goto error; 528 } else if (l3_hash) { 529 rss_next = mlx5_hw_rss_expand_l3(&rss_ctx); 530 if (!rss_next) 531 goto error; 532 } 533 rss_base = mlx5_hw_rss_ptype_create_base_flow(dev, attr, items, actions, 534 ptype_attr.group, item_flags, 535 action_flags, external, 536 flow_type, error); 537 if (!rss_base) 538 goto error; 539 SLIST_INSERT_HEAD(&expansion_head, rss_miss, nt2hws->next); 540 SLIST_INSERT_HEAD(&expansion_head, rss_base, nt2hws->next); 541 /** 542 * PMD must return to application a reference to the base flow. 543 * This way RSS expansion could work with counter, meter and other 544 * flow actions. 545 */ 546 MLX5_ASSERT(rss_base == SLIST_FIRST(&expansion_head)); 547 rss_next = SLIST_NEXT(rss_base, nt2hws->next); 548 while (rss_next) { 549 rss_next->nt2hws->chaned_flow = 1; 550 rss_next = SLIST_NEXT(rss_next, nt2hws->next); 551 } 552 return SLIST_FIRST(&expansion_head); 553 554 error: 555 if (rss_miss) 556 flow_hw_list_destroy(dev, flow_type, (uintptr_t)rss_miss); 557 if (rss_next) 558 flow_hw_list_destroy(dev, flow_type, (uintptr_t)rss_next); 559 mlx5_hw_release_rss_ptype_group(dev, ptype_attr.group); 560 return NULL; 561 } 562 563 #endif 564 565