1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2021 NVIDIA Corporation & Affiliates 3 */ 4 #include <rte_malloc.h> 5 #include <mlx5_devx_cmds.h> 6 #include <mlx5_malloc.h> 7 #include "mlx5.h" 8 #include "mlx5_flow.h" 9 10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM, 11 "Flex item maximal number exceeds uint32_t bit width"); 12 13 /** 14 * Routine called once on port initialization to init flex item 15 * related infrastructure initialization 16 * 17 * @param dev 18 * Ethernet device to perform flex item initialization 19 * 20 * @return 21 * 0 on success, a negative errno value otherwise and rte_errno is set. 22 */ 23 int 24 mlx5_flex_item_port_init(struct rte_eth_dev *dev) 25 { 26 struct mlx5_priv *priv = dev->data->dev_private; 27 28 rte_spinlock_init(&priv->flex_item_sl); 29 MLX5_ASSERT(!priv->flex_item_map); 30 return 0; 31 } 32 33 /** 34 * Routine called once on port close to perform flex item 35 * related infrastructure cleanup. 36 * 37 * @param dev 38 * Ethernet device to perform cleanup 39 */ 40 void 41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev) 42 { 43 struct mlx5_priv *priv = dev->data->dev_private; 44 uint32_t i; 45 46 for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) { 47 if (priv->flex_item_map & (1 << i)) { 48 struct mlx5_flex_item *flex = &priv->flex_item[i]; 49 50 claim_zero(mlx5_list_unregister 51 (priv->sh->flex_parsers_dv, 52 &flex->devx_fp->entry)); 53 flex->devx_fp = NULL; 54 flex->refcnt = 0; 55 priv->flex_item_map &= ~(1 << i); 56 } 57 } 58 } 59 60 static int 61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item) 62 { 63 uintptr_t start = (uintptr_t)&priv->flex_item[0]; 64 uintptr_t entry = (uintptr_t)item; 65 uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item); 66 67 if (entry < start || 68 idx >= MLX5_PORT_FLEX_ITEM_NUM || 69 (entry - start) % sizeof(struct mlx5_flex_item) || 70 !(priv->flex_item_map & (1u << idx))) 71 return -1; 72 return (int)idx; 73 } 74 75 static struct mlx5_flex_item * 76 mlx5_flex_alloc(struct mlx5_priv *priv) 77 { 78 struct mlx5_flex_item *item = NULL; 79 80 rte_spinlock_lock(&priv->flex_item_sl); 81 if (~priv->flex_item_map) { 82 uint32_t idx = rte_bsf32(~priv->flex_item_map); 83 84 if (idx < MLX5_PORT_FLEX_ITEM_NUM) { 85 item = &priv->flex_item[idx]; 86 MLX5_ASSERT(!item->refcnt); 87 MLX5_ASSERT(!item->devx_fp); 88 item->devx_fp = NULL; 89 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE); 90 priv->flex_item_map |= 1u << idx; 91 } 92 } 93 rte_spinlock_unlock(&priv->flex_item_sl); 94 return item; 95 } 96 97 static void 98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item) 99 { 100 int idx = mlx5_flex_index(priv, item); 101 102 MLX5_ASSERT(idx >= 0 && 103 idx < MLX5_PORT_FLEX_ITEM_NUM && 104 (priv->flex_item_map & (1u << idx))); 105 if (idx >= 0) { 106 rte_spinlock_lock(&priv->flex_item_sl); 107 MLX5_ASSERT(!item->refcnt); 108 MLX5_ASSERT(!item->devx_fp); 109 item->devx_fp = NULL; 110 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE); 111 priv->flex_item_map &= ~(1u << idx); 112 rte_spinlock_unlock(&priv->flex_item_sl); 113 } 114 } 115 116 static uint32_t 117 mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item, 118 uint32_t pos, uint32_t width, uint32_t shift) 119 { 120 const uint8_t *ptr = item->pattern + pos / CHAR_BIT; 121 uint32_t val, vbits; 122 123 /* Proceed the bitfield start byte. */ 124 MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width); 125 MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT); 126 if (item->length <= pos / CHAR_BIT) 127 return 0; 128 val = *ptr++ >> (pos % CHAR_BIT); 129 vbits = CHAR_BIT - pos % CHAR_BIT; 130 pos = (pos + vbits) / CHAR_BIT; 131 vbits = RTE_MIN(vbits, width); 132 val &= RTE_BIT32(vbits) - 1; 133 while (vbits < width && pos < item->length) { 134 uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT); 135 uint32_t tmp = *ptr++; 136 137 pos++; 138 tmp &= RTE_BIT32(part) - 1; 139 val |= tmp << vbits; 140 vbits += part; 141 } 142 return rte_bswap32(val <<= shift); 143 } 144 145 #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \ 146 do { \ 147 uint32_t tmp, out = (def); \ 148 tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \ 149 prog_sample_field_value_##x); \ 150 tmp = (tmp & ~out) | (val); \ 151 MLX5_SET(fte_match_set_misc4, misc4_v, \ 152 prog_sample_field_value_##x, tmp); \ 153 tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \ 154 prog_sample_field_value_##x); \ 155 tmp = (tmp & ~out) | (msk); \ 156 MLX5_SET(fte_match_set_misc4, misc4_m, \ 157 prog_sample_field_value_##x, tmp); \ 158 tmp = tmp ? (sid) : 0; \ 159 MLX5_SET(fte_match_set_misc4, misc4_v, \ 160 prog_sample_field_id_##x, tmp);\ 161 MLX5_SET(fte_match_set_misc4, misc4_m, \ 162 prog_sample_field_id_##x, tmp); \ 163 } while (0) 164 165 __rte_always_inline static void 166 mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v, 167 uint32_t def, uint32_t mask, uint32_t value, 168 uint32_t sample_id, uint32_t id) 169 { 170 switch (id) { 171 case 0: 172 SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id); 173 break; 174 case 1: 175 SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id); 176 break; 177 case 2: 178 SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id); 179 break; 180 case 3: 181 SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id); 182 break; 183 case 4: 184 SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id); 185 break; 186 case 5: 187 SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id); 188 break; 189 case 6: 190 SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id); 191 break; 192 case 7: 193 SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id); 194 break; 195 default: 196 MLX5_ASSERT(false); 197 break; 198 } 199 #undef SET_FP_MATCH_SAMPLE_ID 200 } 201 202 /** 203 * Get the flex parser sample id and corresponding mask 204 * per shift and width information. 205 * 206 * @param[in] tp 207 * Mlx5 flex item sample mapping handle. 208 * @param[in] idx 209 * Mapping index. 210 * @param[in, out] pos 211 * Where to search the value and mask. 212 * @param[in] is_inner 213 * For inner matching or not. 214 * @param[in, def] def 215 * Mask generated by mapping shift and width. 216 * 217 * @return 218 * 0 on success, -1 to ignore. 219 */ 220 int 221 mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp, 222 uint32_t idx, uint32_t *pos, 223 bool is_inner, uint32_t *def) 224 { 225 const struct mlx5_flex_pattern_field *map = tp->map + idx; 226 uint32_t id = map->reg_id; 227 228 *def = (RTE_BIT64(map->width) - 1) << map->shift; 229 /* Skip placeholders for DUMMY fields. */ 230 if (id == MLX5_INVALID_SAMPLE_REG_ID) { 231 *pos += map->width; 232 return -1; 233 } 234 MLX5_ASSERT(map->width); 235 MLX5_ASSERT(id < tp->devx_fp->num_samples); 236 if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) { 237 uint32_t num_samples = tp->devx_fp->num_samples / 2; 238 239 MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0); 240 MLX5_ASSERT(id < num_samples); 241 id += num_samples; 242 } 243 return id; 244 } 245 246 /** 247 * Get the flex parser mapping value per definer format_select_dw. 248 * 249 * @param[in] item 250 * Rte flex item pointer. 251 * @param[in] flex 252 * Mlx5 flex item sample mapping handle. 253 * @param[in] byte_off 254 * Mlx5 flex item format_select_dw. 255 * @param[in] is_mask 256 * Spec or mask. 257 * @param[in] tunnel 258 * Tunnel mode or not. 259 * @param[in, def] value 260 * Value calculated for this flex parser, either spec or mask. 261 * 262 * @return 263 * 0 on success, -1 for error. 264 */ 265 int 266 mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item, 267 void *flex, uint32_t byte_off, 268 bool is_mask, bool tunnel, uint32_t *value) 269 { 270 struct mlx5_flex_pattern_field *map; 271 struct mlx5_flex_item *tp = flex; 272 uint32_t def, i, pos, val; 273 int id; 274 275 *value = 0; 276 for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) { 277 map = tp->map + i; 278 id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def); 279 if (id == -1) 280 continue; 281 if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM) 282 return -1; 283 if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) { 284 val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift); 285 if (is_mask) 286 val &= RTE_BE32(def); 287 *value |= val; 288 } 289 pos += map->width; 290 } 291 return 0; 292 } 293 294 /** 295 * Translate item pattern into matcher fields according to translation 296 * array. 297 * 298 * @param dev 299 * Ethernet device to translate flex item on. 300 * @param[in, out] matcher 301 * Flow matcher to configure 302 * @param[in, out] key 303 * Flow matcher value. 304 * @param[in] item 305 * Flow pattern to translate. 306 * @param[in] is_inner 307 * Inner Flex Item (follows after tunnel header). 308 * 309 * @return 310 * 0 on success, a negative errno value otherwise and rte_errno is set. 311 */ 312 void 313 mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, 314 void *matcher, void *key, 315 const struct rte_flow_item *item, 316 bool is_inner) 317 { 318 const struct rte_flow_item_flex *spec, *mask; 319 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, 320 misc_parameters_4); 321 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); 322 struct mlx5_priv *priv = dev->data->dev_private; 323 struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex; 324 struct mlx5_flex_item *tp; 325 uint32_t i, pos = 0; 326 uint32_t sample_id; 327 328 RTE_SET_USED(dev); 329 MLX5_ASSERT(item->spec && item->mask); 330 spec = item->spec; 331 mask = item->mask; 332 tp = (struct mlx5_flex_item *)spec->handle; 333 MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0); 334 for (i = 0; i < tp->mapnum; i++) { 335 struct mlx5_flex_pattern_field *map = tp->map + i; 336 uint32_t val, msk, def; 337 int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def); 338 339 if (id == -1) 340 continue; 341 MLX5_ASSERT(id < (int)tp->devx_fp->num_samples); 342 if (id >= (int)tp->devx_fp->num_samples || 343 id >= MLX5_GRAPH_NODE_SAMPLE_NUM) 344 return; 345 val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift); 346 msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift); 347 if (attr->ext_sample_id) 348 sample_id = tp->devx_fp->sample_ids[id].sample_id; 349 else 350 sample_id = tp->devx_fp->sample_ids[id].id; 351 mlx5_flex_set_match_sample(misc4_m, misc4_v, 352 def, msk & def, val & msk & def, 353 sample_id, id); 354 pos += map->width; 355 } 356 } 357 358 /** 359 * Convert flex item handle (from the RTE flow) to flex item index on port. 360 * Optionally can increment flex item object reference count. 361 * 362 * @param dev 363 * Ethernet device to acquire flex item on. 364 * @param[in] handle 365 * Flow item handle from item spec. 366 * @param[in] acquire 367 * If set - increment reference counter. 368 * 369 * @return 370 * >=0 - index on success, a negative errno value otherwise 371 * and rte_errno is set. 372 */ 373 int 374 mlx5_flex_acquire_index(struct rte_eth_dev *dev, 375 struct rte_flow_item_flex_handle *handle, 376 bool acquire) 377 { 378 struct mlx5_priv *priv = dev->data->dev_private; 379 struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle; 380 int ret = mlx5_flex_index(priv, flex); 381 382 if (ret < 0) { 383 errno = -EINVAL; 384 rte_errno = EINVAL; 385 return ret; 386 } 387 if (acquire) 388 __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE); 389 return ret; 390 } 391 392 /** 393 * Release flex item index on port - decrements reference counter by index. 394 * 395 * @param dev 396 * Ethernet device to acquire flex item on. 397 * @param[in] index 398 * Flow item index. 399 * 400 * @return 401 * 0 - on success, a negative errno value otherwise and rte_errno is set. 402 */ 403 int 404 mlx5_flex_release_index(struct rte_eth_dev *dev, 405 int index) 406 { 407 struct mlx5_priv *priv = dev->data->dev_private; 408 struct mlx5_flex_item *flex; 409 410 if (index >= MLX5_PORT_FLEX_ITEM_NUM || 411 !(priv->flex_item_map & (1u << index))) { 412 errno = EINVAL; 413 rte_errno = -EINVAL; 414 return -EINVAL; 415 } 416 flex = priv->flex_item + index; 417 if (flex->refcnt <= 1) { 418 MLX5_ASSERT(false); 419 errno = EINVAL; 420 rte_errno = -EINVAL; 421 return -EINVAL; 422 } 423 __atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE); 424 return 0; 425 } 426 427 /* 428 * Calculate largest mask value for a given shift. 429 * 430 * shift mask 431 * ------- --------------- 432 * 0 b111100 0x3C 433 * 1 b111110 0x3E 434 * 2 b111111 0x3F 435 * 3 b011111 0x1F 436 * 4 b001111 0x0F 437 * 5 b000111 0x07 438 */ 439 static uint8_t 440 mlx5_flex_hdr_len_mask(uint8_t shift, 441 const struct mlx5_hca_flex_attr *attr) 442 { 443 uint32_t base_mask; 444 int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD; 445 446 base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr); 447 return diff == 0 ? base_mask : 448 diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff; 449 } 450 451 static int 452 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, 453 const struct rte_flow_item_flex_conf *conf, 454 struct mlx5_flex_parser_devx *devx, 455 struct rte_flow_error *error) 456 { 457 const struct rte_flow_item_flex_field *field = &conf->next_header; 458 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; 459 uint32_t len_width, mask; 460 461 if (field->field_base % CHAR_BIT) 462 return rte_flow_error_set 463 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 464 "not byte aligned header length field"); 465 switch (field->field_mode) { 466 case FIELD_MODE_DUMMY: 467 return rte_flow_error_set 468 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 469 "invalid header length field mode (DUMMY)"); 470 case FIELD_MODE_FIXED: 471 if (!(attr->header_length_mode & 472 RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED))) 473 return rte_flow_error_set 474 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 475 "unsupported header length field mode (FIXED)"); 476 if (field->field_size || 477 field->offset_mask || field->offset_shift) 478 return rte_flow_error_set 479 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 480 "invalid fields for fixed mode"); 481 if (field->field_base < 0) 482 return rte_flow_error_set 483 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 484 "negative header length field base (FIXED)"); 485 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 486 break; 487 case FIELD_MODE_OFFSET: 488 if (!(attr->header_length_mode & 489 RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD))) 490 return rte_flow_error_set 491 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 492 "unsupported header length field mode (OFFSET)"); 493 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; 494 if (field->offset_mask == 0 || 495 !rte_is_power_of_2(field->offset_mask + 1)) 496 return rte_flow_error_set 497 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 498 "invalid length field offset mask (OFFSET)"); 499 len_width = rte_fls_u32(field->offset_mask); 500 if (len_width > attr->header_length_mask_width) 501 return rte_flow_error_set 502 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 503 "length field offset mask too wide (OFFSET)"); 504 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); 505 if (mask < field->offset_mask) 506 return rte_flow_error_set 507 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 508 "length field shift too big (OFFSET)"); 509 node->header_length_field_mask = RTE_MIN(mask, 510 field->offset_mask); 511 break; 512 case FIELD_MODE_BITMASK: 513 if (!(attr->header_length_mode & 514 RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK))) 515 return rte_flow_error_set 516 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 517 "unsupported header length field mode (BITMASK)"); 518 if (attr->header_length_mask_width < field->field_size) 519 return rte_flow_error_set 520 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 521 "header length field width exceeds limit"); 522 node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK; 523 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); 524 if (mask < field->offset_mask) 525 return rte_flow_error_set 526 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 527 "length field shift too big (BITMASK)"); 528 node->header_length_field_mask = RTE_MIN(mask, 529 field->offset_mask); 530 break; 531 default: 532 return rte_flow_error_set 533 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 534 "unknown header length field mode"); 535 } 536 if (field->field_base / CHAR_BIT >= 0 && 537 field->field_base / CHAR_BIT > attr->max_base_header_length) 538 return rte_flow_error_set 539 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 540 "header length field base exceeds limit"); 541 node->header_length_base_value = field->field_base / CHAR_BIT; 542 if (field->field_mode == FIELD_MODE_OFFSET || 543 field->field_mode == FIELD_MODE_BITMASK) { 544 if (field->offset_shift > 15 || field->offset_shift < 0) 545 return rte_flow_error_set 546 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 547 "header length field shift exceeds limit"); 548 node->header_length_field_shift = field->offset_shift; 549 node->header_length_field_offset = field->offset_base; 550 } 551 return 0; 552 } 553 554 static int 555 mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr, 556 const struct rte_flow_item_flex_conf *conf, 557 struct mlx5_flex_parser_devx *devx, 558 struct rte_flow_error *error) 559 { 560 const struct rte_flow_item_flex_field *field = &conf->next_protocol; 561 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; 562 563 switch (field->field_mode) { 564 case FIELD_MODE_DUMMY: 565 if (conf->nb_outputs) 566 return rte_flow_error_set 567 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 568 "next protocol field is required (DUMMY)"); 569 return 0; 570 case FIELD_MODE_FIXED: 571 break; 572 case FIELD_MODE_OFFSET: 573 return rte_flow_error_set 574 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 575 "unsupported next protocol field mode (OFFSET)"); 576 break; 577 case FIELD_MODE_BITMASK: 578 return rte_flow_error_set 579 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 580 "unsupported next protocol field mode (BITMASK)"); 581 default: 582 return rte_flow_error_set 583 (error, EINVAL, 584 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 585 "unknown next protocol field mode"); 586 } 587 MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED); 588 if (!conf->nb_outputs) 589 return rte_flow_error_set 590 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 591 "out link(s) is required if next field present"); 592 if (attr->max_next_header_offset < field->field_base) 593 return rte_flow_error_set 594 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 595 "next protocol field base exceeds limit"); 596 if (field->offset_shift) 597 return rte_flow_error_set 598 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 599 "unsupported next protocol field shift"); 600 node->next_header_field_offset = field->field_base; 601 node->next_header_field_size = field->field_size; 602 return 0; 603 } 604 605 /* Helper structure to handle field bit intervals. */ 606 struct mlx5_flex_field_cover { 607 uint16_t num; 608 int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM]; 609 int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM]; 610 uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1]; 611 }; 612 613 static void 614 mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover, 615 uint16_t num, int32_t start, int32_t end) 616 { 617 MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM); 618 MLX5_ASSERT(num <= cover->num); 619 if (num < cover->num) { 620 memmove(&cover->start[num + 1], &cover->start[num], 621 (cover->num - num) * sizeof(int32_t)); 622 memmove(&cover->end[num + 1], &cover->end[num], 623 (cover->num - num) * sizeof(int32_t)); 624 } 625 cover->start[num] = start; 626 cover->end[num] = end; 627 cover->num++; 628 } 629 630 static void 631 mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num) 632 { 633 uint32_t i, del = 0; 634 int32_t end; 635 636 MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM); 637 MLX5_ASSERT(num < (cover->num - 1)); 638 end = cover->end[num]; 639 for (i = num + 1; i < cover->num; i++) { 640 if (end < cover->start[i]) 641 break; 642 del++; 643 if (end <= cover->end[i]) { 644 cover->end[num] = cover->end[i]; 645 break; 646 } 647 } 648 if (del) { 649 MLX5_ASSERT(del < (cover->num - 1u - num)); 650 cover->num -= del; 651 MLX5_ASSERT(cover->num > num); 652 if ((cover->num - num) > 1) { 653 memmove(&cover->start[num + 1], 654 &cover->start[num + 1 + del], 655 (cover->num - num - 1) * sizeof(int32_t)); 656 memmove(&cover->end[num + 1], 657 &cover->end[num + 1 + del], 658 (cover->num - num - 1) * sizeof(int32_t)); 659 } 660 } 661 } 662 663 /* 664 * Validate the sample field and update interval array 665 * if parameters match with the 'match" field. 666 * Returns: 667 * < 0 - error 668 * == 0 - no match, interval array not updated 669 * > 0 - match, interval array updated 670 */ 671 static int 672 mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover, 673 struct rte_flow_item_flex_field *field, 674 struct rte_flow_item_flex_field *match, 675 struct mlx5_hca_flex_attr *attr, 676 struct rte_flow_error *error) 677 { 678 int32_t start, end; 679 uint32_t i; 680 681 switch (field->field_mode) { 682 case FIELD_MODE_DUMMY: 683 return 0; 684 case FIELD_MODE_FIXED: 685 if (!(attr->sample_offset_mode & 686 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED))) 687 return rte_flow_error_set 688 (error, EINVAL, 689 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 690 "unsupported sample field mode (FIXED)"); 691 if (field->offset_shift) 692 return rte_flow_error_set 693 (error, EINVAL, 694 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 695 "invalid sample field shift (FIXED"); 696 if (field->field_base < 0) 697 return rte_flow_error_set 698 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 699 "invalid sample field base (FIXED)"); 700 if (field->field_base / CHAR_BIT > attr->max_sample_base_offset) 701 return rte_flow_error_set 702 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 703 "sample field base exceeds limit (FIXED)"); 704 break; 705 case FIELD_MODE_OFFSET: 706 if (!(attr->sample_offset_mode & 707 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD))) 708 return rte_flow_error_set 709 (error, EINVAL, 710 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 711 "unsupported sample field mode (OFFSET)"); 712 if (field->field_base / CHAR_BIT >= 0 && 713 field->field_base / CHAR_BIT > attr->max_sample_base_offset) 714 return rte_flow_error_set 715 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 716 "sample field base exceeds limit"); 717 break; 718 case FIELD_MODE_BITMASK: 719 if (!(attr->sample_offset_mode & 720 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK))) 721 return rte_flow_error_set 722 (error, EINVAL, 723 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 724 "unsupported sample field mode (BITMASK)"); 725 if (field->field_base / CHAR_BIT >= 0 && 726 field->field_base / CHAR_BIT > attr->max_sample_base_offset) 727 return rte_flow_error_set 728 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 729 "sample field base exceeds limit"); 730 break; 731 default: 732 return rte_flow_error_set 733 (error, EINVAL, 734 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 735 "unknown data sample field mode"); 736 } 737 if (!match) { 738 if (!field->field_size) 739 return rte_flow_error_set 740 (error, EINVAL, 741 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 742 "zero sample field width"); 743 if (field->field_id) 744 DRV_LOG(DEBUG, "sample field id hint ignored"); 745 } else { 746 if (field->field_mode != match->field_mode || 747 field->offset_base | match->offset_base || 748 field->offset_mask | match->offset_mask || 749 field->offset_shift | match->offset_shift) 750 return 0; 751 } 752 start = field->field_base; 753 end = start + field->field_size; 754 /* Add the new or similar field to interval array. */ 755 if (!cover->num) { 756 cover->start[cover->num] = start; 757 cover->end[cover->num] = end; 758 cover->num = 1; 759 return 1; 760 } 761 for (i = 0; i < cover->num; i++) { 762 if (start > cover->end[i]) { 763 if (i >= (cover->num - 1u)) { 764 mlx5_flex_insert_field(cover, cover->num, 765 start, end); 766 break; 767 } 768 continue; 769 } 770 if (end < cover->start[i]) { 771 mlx5_flex_insert_field(cover, i, start, end); 772 break; 773 } 774 if (start < cover->start[i]) 775 cover->start[i] = start; 776 if (end > cover->end[i]) { 777 cover->end[i] = end; 778 if (i < (cover->num - 1u)) 779 mlx5_flex_merge_field(cover, i); 780 } 781 break; 782 } 783 return 1; 784 } 785 786 static void 787 mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na, 788 struct rte_flow_item_flex_field *field, 789 enum rte_flow_item_flex_tunnel_mode tunnel_mode) 790 { 791 memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr)); 792 na->flow_match_sample_en = 1; 793 switch (field->field_mode) { 794 case FIELD_MODE_FIXED: 795 na->flow_match_sample_offset_mode = 796 MLX5_GRAPH_SAMPLE_OFFSET_FIXED; 797 break; 798 case FIELD_MODE_OFFSET: 799 na->flow_match_sample_offset_mode = 800 MLX5_GRAPH_SAMPLE_OFFSET_FIELD; 801 na->flow_match_sample_field_offset = field->offset_base; 802 na->flow_match_sample_field_offset_mask = field->offset_mask; 803 na->flow_match_sample_field_offset_shift = field->offset_shift; 804 break; 805 case FIELD_MODE_BITMASK: 806 na->flow_match_sample_offset_mode = 807 MLX5_GRAPH_SAMPLE_OFFSET_BITMASK; 808 na->flow_match_sample_field_offset = field->offset_base; 809 na->flow_match_sample_field_offset_mask = field->offset_mask; 810 na->flow_match_sample_field_offset_shift = field->offset_shift; 811 break; 812 default: 813 MLX5_ASSERT(false); 814 break; 815 } 816 switch (tunnel_mode) { 817 case FLEX_TUNNEL_MODE_SINGLE: 818 /* Fallthrough */ 819 case FLEX_TUNNEL_MODE_TUNNEL: 820 na->flow_match_sample_tunnel_mode = 821 MLX5_GRAPH_SAMPLE_TUNNEL_FIRST; 822 break; 823 case FLEX_TUNNEL_MODE_MULTI: 824 /* Fallthrough */ 825 case FLEX_TUNNEL_MODE_OUTER: 826 na->flow_match_sample_tunnel_mode = 827 MLX5_GRAPH_SAMPLE_TUNNEL_OUTER; 828 break; 829 case FLEX_TUNNEL_MODE_INNER: 830 na->flow_match_sample_tunnel_mode = 831 MLX5_GRAPH_SAMPLE_TUNNEL_INNER; 832 break; 833 default: 834 MLX5_ASSERT(false); 835 break; 836 } 837 } 838 839 /* Map specified field to set/subset of allocated sample registers. */ 840 static int 841 mlx5_flex_map_sample(struct rte_flow_item_flex_field *field, 842 struct mlx5_flex_parser_devx *parser, 843 struct mlx5_flex_item *item, 844 struct rte_flow_error *error) 845 { 846 struct mlx5_devx_match_sample_attr node; 847 int32_t start = field->field_base; 848 int32_t end = start + field->field_size; 849 struct mlx5_flex_pattern_field *trans; 850 uint32_t i, done_bits = 0; 851 852 if (field->field_mode == FIELD_MODE_DUMMY) { 853 done_bits = field->field_size; 854 while (done_bits) { 855 uint32_t part = RTE_MIN(done_bits, 856 sizeof(uint32_t) * CHAR_BIT); 857 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM) 858 return rte_flow_error_set 859 (error, 860 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 861 "too many flex item pattern translations"); 862 trans = &item->map[item->mapnum]; 863 trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID; 864 trans->shift = 0; 865 trans->width = part; 866 item->mapnum++; 867 done_bits -= part; 868 } 869 return 0; 870 } 871 mlx5_flex_config_sample(&node, field, item->tunnel_mode); 872 for (i = 0; i < parser->num_samples; i++) { 873 struct mlx5_devx_match_sample_attr *sample = 874 &parser->devx_conf.sample[i]; 875 int32_t reg_start, reg_end; 876 int32_t cov_start, cov_end; 877 878 MLX5_ASSERT(sample->flow_match_sample_en); 879 if (!sample->flow_match_sample_en) 880 break; 881 node.flow_match_sample_field_base_offset = 882 sample->flow_match_sample_field_base_offset; 883 if (memcmp(&node, sample, sizeof(node))) 884 continue; 885 reg_start = (int8_t)sample->flow_match_sample_field_base_offset; 886 reg_start *= CHAR_BIT; 887 reg_end = reg_start + 32; 888 if (end <= reg_start || start >= reg_end) 889 continue; 890 cov_start = RTE_MAX(reg_start, start); 891 cov_end = RTE_MIN(reg_end, end); 892 MLX5_ASSERT(cov_end > cov_start); 893 done_bits += cov_end - cov_start; 894 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM) 895 return rte_flow_error_set 896 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 897 "too many flex item pattern translations"); 898 trans = &item->map[item->mapnum]; 899 item->mapnum++; 900 trans->reg_id = i; 901 trans->shift = cov_start - reg_start; 902 trans->width = cov_end - cov_start; 903 } 904 if (done_bits != field->field_size) { 905 MLX5_ASSERT(false); 906 return rte_flow_error_set 907 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 908 "failed to map field to sample register"); 909 } 910 return 0; 911 } 912 913 /* Allocate sample registers for the specified field type and interval array. */ 914 static int 915 mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover, 916 struct mlx5_flex_parser_devx *parser, 917 struct mlx5_flex_item *item, 918 struct rte_flow_item_flex_field *field, 919 struct mlx5_hca_flex_attr *attr, 920 struct rte_flow_error *error) 921 { 922 struct mlx5_devx_match_sample_attr node; 923 uint32_t idx = 0; 924 925 mlx5_flex_config_sample(&node, field, item->tunnel_mode); 926 while (idx < cover->num) { 927 int32_t start, end; 928 929 /* 930 * Sample base offsets are in bytes, should be aligned 931 * to 32-bit as required by firmware for samples. 932 */ 933 start = RTE_ALIGN_FLOOR(cover->start[idx], 934 sizeof(uint32_t) * CHAR_BIT); 935 node.flow_match_sample_field_base_offset = 936 (start / CHAR_BIT) & 0xFF; 937 /* Allocate sample register. */ 938 if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM || 939 parser->num_samples >= attr->max_num_sample || 940 parser->num_samples >= attr->max_num_prog_sample) 941 return rte_flow_error_set 942 (error, EINVAL, 943 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 944 "no sample registers to handle all flex item fields"); 945 parser->devx_conf.sample[parser->num_samples] = node; 946 parser->num_samples++; 947 /* Remove or update covered intervals. */ 948 end = start + 32; 949 while (idx < cover->num) { 950 if (end >= cover->end[idx]) { 951 idx++; 952 continue; 953 } 954 if (end > cover->start[idx]) 955 cover->start[idx] = end; 956 break; 957 } 958 } 959 return 0; 960 } 961 962 static int 963 mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr, 964 const struct rte_flow_item_flex_conf *conf, 965 struct mlx5_flex_parser_devx *parser, 966 struct mlx5_flex_item *item, 967 struct rte_flow_error *error) 968 { 969 struct mlx5_flex_field_cover cover; 970 uint32_t i, j; 971 int ret; 972 973 switch (conf->tunnel) { 974 case FLEX_TUNNEL_MODE_SINGLE: 975 /* Fallthrough */ 976 case FLEX_TUNNEL_MODE_OUTER: 977 /* Fallthrough */ 978 case FLEX_TUNNEL_MODE_INNER: 979 /* Fallthrough */ 980 case FLEX_TUNNEL_MODE_MULTI: 981 /* Fallthrough */ 982 case FLEX_TUNNEL_MODE_TUNNEL: 983 break; 984 default: 985 return rte_flow_error_set 986 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 987 "unrecognized tunnel mode"); 988 } 989 item->tunnel_mode = conf->tunnel; 990 if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM) 991 return rte_flow_error_set 992 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 993 "sample field number exceeds limit"); 994 /* 995 * The application can specify fields smaller or bigger than 32 bits 996 * covered with single sample register and it can specify field 997 * offsets in any order. 998 * 999 * Gather all similar fields together, build array of bit intervals 1000 * in ascending order and try to cover with the smallest set of sample 1001 * registers. 1002 */ 1003 memset(&cover, 0, sizeof(cover)); 1004 for (i = 0; i < conf->nb_samples; i++) { 1005 struct rte_flow_item_flex_field *fl = conf->sample_data + i; 1006 1007 /* Check whether field was covered in the previous iteration. */ 1008 if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT))) 1009 continue; 1010 if (fl->field_mode == FIELD_MODE_DUMMY) 1011 continue; 1012 /* Build an interval array for the field and similar ones */ 1013 cover.num = 0; 1014 /* Add the first field to array unconditionally. */ 1015 ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error); 1016 if (ret < 0) 1017 return ret; 1018 MLX5_ASSERT(ret > 0); 1019 cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT); 1020 for (j = i + 1; j < conf->nb_samples; j++) { 1021 struct rte_flow_item_flex_field *ft; 1022 1023 /* Add field to array if its type matches. */ 1024 ft = conf->sample_data + j; 1025 ret = mlx5_flex_cover_sample(&cover, ft, fl, 1026 attr, error); 1027 if (ret < 0) 1028 return ret; 1029 if (!ret) 1030 continue; 1031 cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT); 1032 } 1033 /* Allocate sample registers to cover array of intervals. */ 1034 ret = mlx5_flex_alloc_sample(&cover, parser, item, 1035 fl, attr, error); 1036 if (ret) 1037 return ret; 1038 } 1039 /* Build the item pattern translating data on flow creation. */ 1040 item->mapnum = 0; 1041 memset(&item->map, 0, sizeof(item->map)); 1042 for (i = 0; i < conf->nb_samples; i++) { 1043 struct rte_flow_item_flex_field *fl = conf->sample_data + i; 1044 1045 ret = mlx5_flex_map_sample(fl, parser, item, error); 1046 if (ret) { 1047 MLX5_ASSERT(false); 1048 return ret; 1049 } 1050 } 1051 if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) { 1052 /* 1053 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets 1054 * of samples. The first set is for outer and the second set 1055 * for inner flex flow item. Outer and inner samples differ 1056 * only in tunnel_mode. 1057 */ 1058 if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2) 1059 return rte_flow_error_set 1060 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1061 "no sample registers for inner"); 1062 rte_memcpy(parser->devx_conf.sample + parser->num_samples, 1063 parser->devx_conf.sample, 1064 parser->num_samples * 1065 sizeof(parser->devx_conf.sample[0])); 1066 for (i = 0; i < parser->num_samples; i++) { 1067 struct mlx5_devx_match_sample_attr *sm = i + 1068 parser->devx_conf.sample + parser->num_samples; 1069 1070 sm->flow_match_sample_tunnel_mode = 1071 MLX5_GRAPH_SAMPLE_TUNNEL_INNER; 1072 } 1073 parser->num_samples *= 2; 1074 } 1075 return 0; 1076 } 1077 1078 static int 1079 mlx5_flex_arc_type(enum rte_flow_item_type type, int in) 1080 { 1081 switch (type) { 1082 case RTE_FLOW_ITEM_TYPE_ETH: 1083 return MLX5_GRAPH_ARC_NODE_MAC; 1084 case RTE_FLOW_ITEM_TYPE_IPV4: 1085 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4; 1086 case RTE_FLOW_ITEM_TYPE_IPV6: 1087 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6; 1088 case RTE_FLOW_ITEM_TYPE_UDP: 1089 return MLX5_GRAPH_ARC_NODE_UDP; 1090 case RTE_FLOW_ITEM_TYPE_TCP: 1091 return MLX5_GRAPH_ARC_NODE_TCP; 1092 case RTE_FLOW_ITEM_TYPE_MPLS: 1093 return MLX5_GRAPH_ARC_NODE_MPLS; 1094 case RTE_FLOW_ITEM_TYPE_GRE: 1095 return MLX5_GRAPH_ARC_NODE_GRE; 1096 case RTE_FLOW_ITEM_TYPE_GENEVE: 1097 return MLX5_GRAPH_ARC_NODE_GENEVE; 1098 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1099 return MLX5_GRAPH_ARC_NODE_VXLAN_GPE; 1100 default: 1101 return -EINVAL; 1102 } 1103 } 1104 1105 static int 1106 mlx5_flex_arc_in_eth(const struct rte_flow_item *item, 1107 struct rte_flow_error *error) 1108 { 1109 const struct rte_flow_item_eth *spec = item->spec; 1110 const struct rte_flow_item_eth *mask = item->mask; 1111 struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) }; 1112 1113 if (memcmp(mask, ð, sizeof(struct rte_flow_item_eth))) { 1114 return rte_flow_error_set 1115 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, 1116 "invalid eth item mask"); 1117 } 1118 return rte_be_to_cpu_16(spec->hdr.ether_type); 1119 } 1120 1121 static int 1122 mlx5_flex_arc_in_udp(const struct rte_flow_item *item, 1123 struct rte_flow_error *error) 1124 { 1125 const struct rte_flow_item_udp *spec = item->spec; 1126 const struct rte_flow_item_udp *mask = item->mask; 1127 struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) }; 1128 1129 if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) { 1130 return rte_flow_error_set 1131 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, 1132 "invalid eth item mask"); 1133 } 1134 return rte_be_to_cpu_16(spec->hdr.dst_port); 1135 } 1136 1137 static int 1138 mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item, 1139 struct rte_flow_error *error) 1140 { 1141 const struct rte_flow_item_ipv6 *spec = item->spec; 1142 const struct rte_flow_item_ipv6 *mask = item->mask; 1143 struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff }; 1144 1145 if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) { 1146 return rte_flow_error_set 1147 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, 1148 "invalid ipv6 item mask, full mask is desired"); 1149 } 1150 return spec->hdr.proto; 1151 } 1152 1153 static int 1154 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr, 1155 const struct rte_flow_item_flex_conf *conf, 1156 struct mlx5_flex_parser_devx *devx, 1157 struct mlx5_flex_item *item, 1158 struct rte_flow_error *error) 1159 { 1160 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; 1161 uint32_t i; 1162 1163 RTE_SET_USED(item); 1164 if (conf->nb_inputs > attr->max_num_arc_in) 1165 return rte_flow_error_set 1166 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1167 "too many input links"); 1168 for (i = 0; i < conf->nb_inputs; i++) { 1169 struct mlx5_devx_graph_arc_attr *arc = node->in + i; 1170 struct rte_flow_item_flex_link *link = conf->input_link + i; 1171 const struct rte_flow_item *rte_item = &link->item; 1172 int arc_type; 1173 int ret; 1174 1175 if (!rte_item->spec || !rte_item->mask || rte_item->last) 1176 return rte_flow_error_set 1177 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1178 "invalid flex item IN arc format"); 1179 arc_type = mlx5_flex_arc_type(rte_item->type, true); 1180 if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type))) 1181 return rte_flow_error_set 1182 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1183 "unsupported flex item IN arc type"); 1184 arc->arc_parse_graph_node = arc_type; 1185 arc->start_inner_tunnel = 0; 1186 /* 1187 * Configure arc IN condition value. The value location depends 1188 * on protocol. Current FW version supports IP & UDP for IN 1189 * arcs only, and locations for these protocols are defined. 1190 * Add more protocols when available. 1191 */ 1192 switch (rte_item->type) { 1193 case RTE_FLOW_ITEM_TYPE_ETH: 1194 ret = mlx5_flex_arc_in_eth(rte_item, error); 1195 break; 1196 case RTE_FLOW_ITEM_TYPE_UDP: 1197 ret = mlx5_flex_arc_in_udp(rte_item, error); 1198 break; 1199 case RTE_FLOW_ITEM_TYPE_IPV6: 1200 ret = mlx5_flex_arc_in_ipv6(rte_item, error); 1201 break; 1202 default: 1203 MLX5_ASSERT(false); 1204 return rte_flow_error_set 1205 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1206 "unsupported flex item IN arc type"); 1207 } 1208 if (ret < 0) 1209 return ret; 1210 arc->compare_condition_value = (uint16_t)ret; 1211 } 1212 return 0; 1213 } 1214 1215 static int 1216 mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr, 1217 const struct rte_flow_item_flex_conf *conf, 1218 struct mlx5_flex_parser_devx *devx, 1219 struct mlx5_flex_item *item, 1220 struct rte_flow_error *error) 1221 { 1222 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; 1223 bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL; 1224 uint32_t i; 1225 1226 RTE_SET_USED(item); 1227 if (conf->nb_outputs > attr->max_num_arc_out) 1228 return rte_flow_error_set 1229 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1230 "too many output links"); 1231 for (i = 0; i < conf->nb_outputs; i++) { 1232 struct mlx5_devx_graph_arc_attr *arc = node->out + i; 1233 struct rte_flow_item_flex_link *link = conf->output_link + i; 1234 const struct rte_flow_item *rte_item = &link->item; 1235 int arc_type; 1236 1237 if (rte_item->spec || rte_item->mask || rte_item->last) 1238 return rte_flow_error_set 1239 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1240 "flex node: invalid OUT arc format"); 1241 arc_type = mlx5_flex_arc_type(rte_item->type, false); 1242 if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type))) 1243 return rte_flow_error_set 1244 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1245 "unsupported flex item OUT arc type"); 1246 arc->arc_parse_graph_node = arc_type; 1247 arc->start_inner_tunnel = !!is_tunnel; 1248 arc->compare_condition_value = link->next; 1249 } 1250 return 0; 1251 } 1252 1253 /* Translate RTE flex item API configuration into flaex parser settings. */ 1254 static int 1255 mlx5_flex_translate_conf(struct rte_eth_dev *dev, 1256 const struct rte_flow_item_flex_conf *conf, 1257 struct mlx5_flex_parser_devx *devx, 1258 struct mlx5_flex_item *item, 1259 struct rte_flow_error *error) 1260 { 1261 struct mlx5_priv *priv = dev->data->dev_private; 1262 struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex; 1263 int ret; 1264 1265 ret = mlx5_flex_translate_length(attr, conf, devx, error); 1266 if (ret) 1267 return ret; 1268 ret = mlx5_flex_translate_next(attr, conf, devx, error); 1269 if (ret) 1270 return ret; 1271 ret = mlx5_flex_translate_sample(attr, conf, devx, item, error); 1272 if (ret) 1273 return ret; 1274 ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error); 1275 if (ret) 1276 return ret; 1277 ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error); 1278 if (ret) 1279 return ret; 1280 return 0; 1281 } 1282 1283 /** 1284 * Create the flex item with specified configuration over the Ethernet device. 1285 * 1286 * @param dev 1287 * Ethernet device to create flex item on. 1288 * @param[in] conf 1289 * Flex item configuration. 1290 * @param[out] error 1291 * Perform verbose error reporting if not NULL. PMDs initialize this 1292 * structure in case of error only. 1293 * 1294 * @return 1295 * Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set. 1296 */ 1297 struct rte_flow_item_flex_handle * 1298 flow_dv_item_create(struct rte_eth_dev *dev, 1299 const struct rte_flow_item_flex_conf *conf, 1300 struct rte_flow_error *error) 1301 { 1302 struct mlx5_priv *priv = dev->data->dev_private; 1303 struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL }; 1304 struct mlx5_flex_item *flex; 1305 struct mlx5_list_entry *ent; 1306 1307 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 1308 flex = mlx5_flex_alloc(priv); 1309 if (!flex) { 1310 rte_flow_error_set(error, ENOMEM, 1311 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1312 "too many flex items created on the port"); 1313 return NULL; 1314 } 1315 if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error)) 1316 goto error; 1317 ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config); 1318 if (!ent) { 1319 rte_flow_error_set(error, ENOMEM, 1320 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1321 "flex item creation failure"); 1322 goto error; 1323 } 1324 flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry); 1325 /* Mark initialized flex item valid. */ 1326 __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE); 1327 return (struct rte_flow_item_flex_handle *)flex; 1328 1329 error: 1330 mlx5_flex_free(priv, flex); 1331 return NULL; 1332 } 1333 1334 /** 1335 * Release the flex item on the specified Ethernet device. 1336 * 1337 * @param dev 1338 * Ethernet device to destroy flex item on. 1339 * @param[in] handle 1340 * Handle of the item existing on the specified device. 1341 * @param[out] error 1342 * Perform verbose error reporting if not NULL. PMDs initialize this 1343 * structure in case of error only. 1344 * 1345 * @return 1346 * 0 on success, a negative errno value otherwise and rte_errno is set. 1347 */ 1348 int 1349 flow_dv_item_release(struct rte_eth_dev *dev, 1350 const struct rte_flow_item_flex_handle *handle, 1351 struct rte_flow_error *error) 1352 { 1353 struct mlx5_priv *priv = dev->data->dev_private; 1354 struct mlx5_flex_item *flex = 1355 (struct mlx5_flex_item *)(uintptr_t)handle; 1356 uint32_t old_refcnt = 1; 1357 int rc; 1358 1359 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 1360 rte_spinlock_lock(&priv->flex_item_sl); 1361 if (mlx5_flex_index(priv, flex) < 0) { 1362 rte_spinlock_unlock(&priv->flex_item_sl); 1363 return rte_flow_error_set(error, EINVAL, 1364 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1365 "invalid flex item handle value"); 1366 } 1367 if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0, 1368 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { 1369 rte_spinlock_unlock(&priv->flex_item_sl); 1370 return rte_flow_error_set(error, EBUSY, 1371 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1372 "flex item has flow references"); 1373 } 1374 /* Flex item is marked as invalid, we can leave locked section. */ 1375 rte_spinlock_unlock(&priv->flex_item_sl); 1376 MLX5_ASSERT(flex->devx_fp); 1377 rc = mlx5_list_unregister(priv->sh->flex_parsers_dv, 1378 &flex->devx_fp->entry); 1379 flex->devx_fp = NULL; 1380 mlx5_flex_free(priv, flex); 1381 if (rc < 0) 1382 return rte_flow_error_set(error, EBUSY, 1383 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1384 "flex item release failure"); 1385 return 0; 1386 } 1387 1388 /* DevX flex parser list callbacks. */ 1389 struct mlx5_list_entry * 1390 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx) 1391 { 1392 struct mlx5_dev_ctx_shared *sh = list_ctx; 1393 struct mlx5_flex_parser_devx *fp, *conf = ctx; 1394 int ret; 1395 1396 fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx), 1397 0, SOCKET_ID_ANY); 1398 if (!fp) 1399 return NULL; 1400 /* Copy the requested configurations. */ 1401 fp->num_samples = conf->num_samples; 1402 memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf)); 1403 /* Create DevX flex parser. */ 1404 fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx, 1405 &fp->devx_conf); 1406 if (!fp->devx_obj) 1407 goto error; 1408 /* Query the firmware assigned sample ids. */ 1409 ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj, 1410 fp->sample_ids, 1411 fp->num_samples, 1412 &fp->anchor_id); 1413 if (ret) 1414 goto error; 1415 DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u", 1416 (const void *)fp, fp->num_samples); 1417 return &fp->entry; 1418 error: 1419 if (fp->devx_obj) 1420 mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj); 1421 if (fp) 1422 mlx5_free(fp); 1423 return NULL; 1424 } 1425 1426 int 1427 mlx5_flex_parser_match_cb(void *list_ctx, 1428 struct mlx5_list_entry *iter, void *ctx) 1429 { 1430 struct mlx5_flex_parser_devx *fp = 1431 container_of(iter, struct mlx5_flex_parser_devx, entry); 1432 struct mlx5_flex_parser_devx *org = 1433 container_of(ctx, struct mlx5_flex_parser_devx, entry); 1434 1435 RTE_SET_USED(list_ctx); 1436 return !iter || !ctx || memcmp(&fp->devx_conf, 1437 &org->devx_conf, 1438 sizeof(fp->devx_conf)); 1439 } 1440 1441 void 1442 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry) 1443 { 1444 struct mlx5_flex_parser_devx *fp = 1445 container_of(entry, struct mlx5_flex_parser_devx, entry); 1446 1447 RTE_SET_USED(list_ctx); 1448 MLX5_ASSERT(fp->devx_obj); 1449 claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj)); 1450 DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp); 1451 mlx5_free(entry); 1452 } 1453 1454 struct mlx5_list_entry * 1455 mlx5_flex_parser_clone_cb(void *list_ctx, 1456 struct mlx5_list_entry *entry, void *ctx) 1457 { 1458 struct mlx5_flex_parser_devx *fp; 1459 1460 RTE_SET_USED(list_ctx); 1461 RTE_SET_USED(entry); 1462 fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx), 1463 0, SOCKET_ID_ANY); 1464 if (!fp) 1465 return NULL; 1466 memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx)); 1467 return &fp->entry; 1468 } 1469 1470 void 1471 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry) 1472 { 1473 struct mlx5_flex_parser_devx *fp = 1474 container_of(entry, struct mlx5_flex_parser_devx, entry); 1475 RTE_SET_USED(list_ctx); 1476 mlx5_free(fp); 1477 } 1478