1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Corigine, Inc. 3 * All rights reserved. 4 */ 5 6 #include "nfp_net_flow.h" 7 8 #include <rte_flow_driver.h> 9 #include <rte_hash.h> 10 #include <rte_jhash.h> 11 #include <rte_malloc.h> 12 13 #include "nfp_logs.h" 14 #include "nfp_net_cmsg.h" 15 16 /* Static initializer for a list of subsequent item types */ 17 #define NEXT_ITEM(...) \ 18 ((const enum rte_flow_item_type []){ \ 19 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ 20 }) 21 22 /* Process structure associated with a flow item */ 23 struct nfp_net_flow_item_proc { 24 /* Bit-mask for fields supported by this PMD. */ 25 const void *mask_support; 26 /* Bit-mask to use when @p item->mask is not provided. */ 27 const void *mask_default; 28 /* Size in bytes for @p mask_support and @p mask_default. */ 29 const uint32_t mask_sz; 30 /* Merge a pattern item into a flow rule handle. */ 31 int (*merge)(struct rte_flow *nfp_flow, 32 const struct rte_flow_item *item, 33 const struct nfp_net_flow_item_proc *proc); 34 /* List of possible subsequent items. */ 35 const enum rte_flow_item_type *const next_item; 36 }; 37 38 static int 39 nfp_net_flow_table_add(struct nfp_net_priv *priv, 40 struct rte_flow *nfp_flow) 41 { 42 int ret; 43 44 ret = rte_hash_add_key_data(priv->flow_table, &nfp_flow->hash_key, nfp_flow); 45 if (ret != 0) { 46 PMD_DRV_LOG(ERR, "Add to flow table failed."); 47 return ret; 48 } 49 50 return 0; 51 } 52 53 static int 54 nfp_net_flow_table_delete(struct nfp_net_priv *priv, 55 struct rte_flow *nfp_flow) 56 { 57 int ret; 58 59 ret = rte_hash_del_key(priv->flow_table, &nfp_flow->hash_key); 60 if (ret < 0) { 61 PMD_DRV_LOG(ERR, "Delete from flow table failed."); 62 return ret; 63 } 64 65 return 0; 66 } 67 68 static struct rte_flow * 69 nfp_net_flow_table_search(struct nfp_net_priv *priv, 70 struct rte_flow *nfp_flow) 71 { 72 int index; 73 struct rte_flow *flow_find; 74 75 index = rte_hash_lookup_data(priv->flow_table, &nfp_flow->hash_key, 76 (void **)&flow_find); 77 if (index < 0) { 78 PMD_DRV_LOG(DEBUG, "Data NOT found in the flow table."); 79 return NULL; 80 } 81 82 return flow_find; 83 } 84 85 static int 86 nfp_net_flow_position_acquire(struct nfp_net_priv *priv, 87 uint32_t priority, 88 struct rte_flow *nfp_flow) 89 { 90 uint32_t i; 91 92 if (priority != 0) { 93 i = NFP_NET_FLOW_LIMIT - priority - 1; 94 95 if (priv->flow_position[i]) { 96 PMD_DRV_LOG(ERR, "There is already a flow rule in this place."); 97 return -EAGAIN; 98 } 99 100 priv->flow_position[i] = true; 101 nfp_flow->position = priority; 102 return 0; 103 } 104 105 for (i = 0; i < NFP_NET_FLOW_LIMIT; i++) { 106 if (!priv->flow_position[i]) { 107 priv->flow_position[i] = true; 108 break; 109 } 110 } 111 112 if (i == NFP_NET_FLOW_LIMIT) { 113 PMD_DRV_LOG(ERR, "The limited flow number is reach."); 114 return -ERANGE; 115 } 116 117 nfp_flow->position = NFP_NET_FLOW_LIMIT - i - 1; 118 119 return 0; 120 } 121 122 static void 123 nfp_net_flow_position_free(struct nfp_net_priv *priv, 124 struct rte_flow *nfp_flow) 125 { 126 uint32_t index; 127 128 index = NFP_NET_FLOW_LIMIT - 1 - nfp_flow->position; 129 130 priv->flow_position[index] = false; 131 } 132 133 static struct rte_flow * 134 nfp_net_flow_alloc(struct nfp_net_priv *priv, 135 uint32_t priority, 136 uint32_t match_len, 137 uint32_t action_len, 138 uint32_t port_id) 139 { 140 int ret; 141 char *data; 142 struct rte_flow *nfp_flow; 143 struct nfp_net_flow_payload *payload; 144 145 nfp_flow = rte_zmalloc("nfp_flow", sizeof(struct rte_flow), 0); 146 if (nfp_flow == NULL) 147 return NULL; 148 149 data = rte_zmalloc("nfp_flow_payload", match_len + action_len, 0); 150 if (data == NULL) 151 goto free_flow; 152 153 ret = nfp_net_flow_position_acquire(priv, priority, nfp_flow); 154 if (ret != 0) 155 goto free_payload; 156 157 nfp_flow->port_id = port_id; 158 payload = &nfp_flow->payload; 159 payload->match_len = match_len; 160 payload->action_len = action_len; 161 payload->match_data = data; 162 payload->action_data = data + match_len; 163 164 return nfp_flow; 165 166 free_payload: 167 rte_free(data); 168 free_flow: 169 rte_free(nfp_flow); 170 171 return NULL; 172 } 173 174 static void 175 nfp_net_flow_free(struct nfp_net_priv *priv, 176 struct rte_flow *nfp_flow) 177 { 178 nfp_net_flow_position_free(priv, nfp_flow); 179 rte_free(nfp_flow->payload.match_data); 180 rte_free(nfp_flow); 181 } 182 183 static int 184 nfp_net_flow_calculate_items(const struct rte_flow_item items[], 185 uint32_t *match_len, 186 uint32_t *item_type) 187 { 188 int ret = -EINVAL; 189 const struct rte_flow_item *item; 190 191 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 192 switch (item->type) { 193 case RTE_FLOW_ITEM_TYPE_ETH: 194 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected"); 195 *match_len = sizeof(struct nfp_net_cmsg_match_eth); 196 *item_type = RTE_FLOW_ITEM_TYPE_ETH; 197 ret = 0; 198 break; 199 case RTE_FLOW_ITEM_TYPE_IPV4: 200 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected"); 201 *match_len = sizeof(struct nfp_net_cmsg_match_v4); 202 *item_type = RTE_FLOW_ITEM_TYPE_IPV4; 203 return 0; 204 case RTE_FLOW_ITEM_TYPE_IPV6: 205 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected"); 206 *match_len = sizeof(struct nfp_net_cmsg_match_v6); 207 *item_type = RTE_FLOW_ITEM_TYPE_IPV6; 208 return 0; 209 default: 210 PMD_DRV_LOG(ERR, "Can't calculate match length"); 211 *match_len = 0; 212 return -ENOTSUP; 213 } 214 } 215 216 return ret; 217 } 218 219 static int 220 nfp_net_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow, 221 const struct rte_flow_item *item, 222 __rte_unused const struct nfp_net_flow_item_proc *proc) 223 { 224 struct nfp_net_cmsg_match_eth *eth; 225 const struct rte_flow_item_eth *spec; 226 227 spec = item->spec; 228 if (spec == NULL) { 229 PMD_DRV_LOG(ERR, "NFP flow merge eth: no item->spec!"); 230 return -EINVAL; 231 } 232 233 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE; 234 235 eth = (struct nfp_net_cmsg_match_eth *)nfp_flow->payload.match_data; 236 eth->ether_type = rte_be_to_cpu_16(spec->type); 237 238 return 0; 239 } 240 241 static int 242 nfp_net_flow_merge_ipv4(struct rte_flow *nfp_flow, 243 const struct rte_flow_item *item, 244 const struct nfp_net_flow_item_proc *proc) 245 { 246 struct nfp_net_cmsg_match_v4 *ipv4; 247 const struct rte_flow_item_ipv4 *mask; 248 const struct rte_flow_item_ipv4 *spec; 249 250 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V4; 251 252 spec = item->spec; 253 if (spec == NULL) { 254 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!"); 255 return 0; 256 } 257 258 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 259 260 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 261 262 ipv4->l4_protocol_mask = mask->hdr.next_proto_id; 263 ipv4->src_ipv4_mask = rte_be_to_cpu_32(mask->hdr.src_addr); 264 ipv4->dst_ipv4_mask = rte_be_to_cpu_32(mask->hdr.dst_addr); 265 266 ipv4->l4_protocol = spec->hdr.next_proto_id; 267 ipv4->src_ipv4 = rte_be_to_cpu_32(spec->hdr.src_addr); 268 ipv4->dst_ipv4 = rte_be_to_cpu_32(spec->hdr.dst_addr); 269 270 return 0; 271 } 272 273 static int 274 nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow, 275 const struct rte_flow_item *item, 276 const struct nfp_net_flow_item_proc *proc) 277 { 278 uint32_t i; 279 struct nfp_net_cmsg_match_v6 *ipv6; 280 const struct rte_flow_item_ipv6 *mask; 281 const struct rte_flow_item_ipv6 *spec; 282 283 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V6; 284 285 spec = item->spec; 286 if (spec == NULL) { 287 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!"); 288 return 0; 289 } 290 291 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 292 293 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 294 295 ipv6->l4_protocol_mask = mask->hdr.proto; 296 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 297 ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3]; 298 ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2]; 299 ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1]; 300 ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i]; 301 302 ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3]; 303 ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2]; 304 ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1]; 305 ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i]; 306 } 307 308 ipv6->l4_protocol = spec->hdr.proto; 309 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 310 ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3]; 311 ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2]; 312 ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1]; 313 ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i]; 314 315 ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3]; 316 ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2]; 317 ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1]; 318 ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i]; 319 } 320 321 return 0; 322 } 323 324 static int 325 nfp_flow_merge_l4(struct rte_flow *nfp_flow, 326 const struct rte_flow_item *item, 327 const struct nfp_net_flow_item_proc *proc) 328 { 329 const struct rte_flow_item_tcp *mask; 330 const struct rte_flow_item_tcp *spec; 331 struct nfp_net_cmsg_match_v4 *ipv4 = NULL; 332 struct nfp_net_cmsg_match_v6 *ipv6 = NULL; 333 334 spec = item->spec; 335 if (spec == NULL) { 336 PMD_DRV_LOG(ERR, "NFP flow merge tcp: no item->spec!"); 337 return -EINVAL; 338 } 339 340 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 341 342 switch (nfp_flow->payload.cmsg_type) { 343 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 344 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 345 break; 346 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 347 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 348 break; 349 default: 350 PMD_DRV_LOG(ERR, "L3 layer neither IPv4 nor IPv6."); 351 return -EINVAL; 352 } 353 354 if (ipv4 != NULL) { 355 ipv4->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 356 ipv4->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 357 358 ipv4->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 359 ipv4->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 360 } else if (ipv6 != NULL) { 361 ipv6->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 362 ipv6->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 363 364 ipv6->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 365 ipv6->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 366 } else { 367 PMD_DRV_LOG(ERR, "No valid L3 layer pointer."); 368 return -EINVAL; 369 } 370 371 return 0; 372 } 373 374 /* Graph of supported items and associated process function */ 375 static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = { 376 [RTE_FLOW_ITEM_TYPE_END] = { 377 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH, 378 RTE_FLOW_ITEM_TYPE_IPV4, 379 RTE_FLOW_ITEM_TYPE_IPV6), 380 }, 381 [RTE_FLOW_ITEM_TYPE_ETH] = { 382 .merge = nfp_net_flow_merge_eth, 383 }, 384 [RTE_FLOW_ITEM_TYPE_IPV4] = { 385 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 386 RTE_FLOW_ITEM_TYPE_UDP, 387 RTE_FLOW_ITEM_TYPE_SCTP), 388 .mask_support = &(const struct rte_flow_item_ipv4){ 389 .hdr = { 390 .next_proto_id = 0xff, 391 .src_addr = RTE_BE32(0xffffffff), 392 .dst_addr = RTE_BE32(0xffffffff), 393 }, 394 }, 395 .mask_default = &rte_flow_item_ipv4_mask, 396 .mask_sz = sizeof(struct rte_flow_item_ipv4), 397 .merge = nfp_net_flow_merge_ipv4, 398 }, 399 [RTE_FLOW_ITEM_TYPE_IPV6] = { 400 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 401 RTE_FLOW_ITEM_TYPE_UDP, 402 RTE_FLOW_ITEM_TYPE_SCTP), 403 .mask_support = &(const struct rte_flow_item_ipv6){ 404 .hdr = { 405 .proto = 0xff, 406 .src_addr = "\xff\xff\xff\xff\xff\xff\xff\xff" 407 "\xff\xff\xff\xff\xff\xff\xff\xff", 408 .dst_addr = "\xff\xff\xff\xff\xff\xff\xff\xff" 409 "\xff\xff\xff\xff\xff\xff\xff\xff", 410 }, 411 }, 412 .mask_default = &rte_flow_item_ipv6_mask, 413 .mask_sz = sizeof(struct rte_flow_item_ipv6), 414 .merge = nfp_net_flow_merge_ipv6, 415 }, 416 [RTE_FLOW_ITEM_TYPE_TCP] = { 417 .mask_support = &(const struct rte_flow_item_tcp){ 418 .hdr = { 419 .src_port = RTE_BE16(0xffff), 420 .dst_port = RTE_BE16(0xffff), 421 }, 422 }, 423 .mask_default = &rte_flow_item_tcp_mask, 424 .mask_sz = sizeof(struct rte_flow_item_tcp), 425 .merge = nfp_flow_merge_l4, 426 }, 427 [RTE_FLOW_ITEM_TYPE_UDP] = { 428 .mask_support = &(const struct rte_flow_item_udp){ 429 .hdr = { 430 .src_port = RTE_BE16(0xffff), 431 .dst_port = RTE_BE16(0xffff), 432 }, 433 }, 434 .mask_default = &rte_flow_item_udp_mask, 435 .mask_sz = sizeof(struct rte_flow_item_udp), 436 .merge = nfp_flow_merge_l4, 437 }, 438 [RTE_FLOW_ITEM_TYPE_SCTP] = { 439 .mask_support = &(const struct rte_flow_item_sctp){ 440 .hdr = { 441 .src_port = RTE_BE16(0xffff), 442 .dst_port = RTE_BE16(0xffff), 443 }, 444 }, 445 .mask_default = &rte_flow_item_sctp_mask, 446 .mask_sz = sizeof(struct rte_flow_item_sctp), 447 .merge = nfp_flow_merge_l4, 448 }, 449 }; 450 451 static int 452 nfp_net_flow_item_check(const struct rte_flow_item *item, 453 const struct nfp_net_flow_item_proc *proc) 454 { 455 uint32_t i; 456 int ret = 0; 457 const uint8_t *mask; 458 459 /* item->last and item->mask cannot exist without item->spec. */ 460 if (item->spec == NULL) { 461 if (item->mask || item->last) { 462 PMD_DRV_LOG(ERR, "'mask' or 'last' field provided" 463 " without a corresponding 'spec'."); 464 return -EINVAL; 465 } 466 467 /* No spec, no mask, no problem. */ 468 return 0; 469 } 470 471 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 472 473 /* 474 * Single-pass check to make sure that: 475 * - Mask is supported, no bits are set outside proc->mask_support. 476 * - Both item->spec and item->last are included in mask. 477 */ 478 for (i = 0; i != proc->mask_sz; ++i) { 479 if (mask[i] == 0) 480 continue; 481 482 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) != 483 ((const uint8_t *)proc->mask_support)[i]) { 484 PMD_DRV_LOG(ERR, "Unsupported field found in 'mask'."); 485 ret = -EINVAL; 486 break; 487 } 488 489 if (item->last != NULL && 490 (((const uint8_t *)item->spec)[i] & mask[i]) != 491 (((const uint8_t *)item->last)[i] & mask[i])) { 492 PMD_DRV_LOG(ERR, "Range between 'spec' and 'last'" 493 " is larger than 'mask'."); 494 ret = -ERANGE; 495 break; 496 } 497 } 498 499 return ret; 500 } 501 502 static int 503 nfp_net_flow_compile_items(const struct rte_flow_item items[], 504 struct rte_flow *nfp_flow) 505 { 506 uint32_t i; 507 int ret = 0; 508 const struct rte_flow_item *item; 509 const struct nfp_net_flow_item_proc *proc_list; 510 511 proc_list = nfp_net_flow_item_proc_list; 512 513 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 514 const struct nfp_net_flow_item_proc *proc = NULL; 515 516 for (i = 0; (proc_list->next_item != NULL) && 517 (proc_list->next_item[i] != RTE_FLOW_ITEM_TYPE_END); ++i) { 518 if (proc_list->next_item[i] == item->type) { 519 proc = &nfp_net_flow_item_proc_list[item->type]; 520 break; 521 } 522 } 523 524 if (proc == NULL) { 525 PMD_DRV_LOG(ERR, "No next item provided for %d", item->type); 526 ret = -ENOTSUP; 527 break; 528 } 529 530 /* Perform basic sanity checks */ 531 ret = nfp_net_flow_item_check(item, proc); 532 if (ret != 0) { 533 PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type); 534 ret = -EINVAL; 535 break; 536 } 537 538 if (proc->merge == NULL) { 539 PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type); 540 ret = -ENOTSUP; 541 break; 542 } 543 544 ret = proc->merge(nfp_flow, item, proc); 545 if (ret != 0) { 546 PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type); 547 break; 548 } 549 550 proc_list = proc; 551 } 552 553 return ret; 554 } 555 556 static void 557 nfp_net_flow_action_drop(struct rte_flow *nfp_flow) 558 { 559 struct nfp_net_cmsg_action *action_data; 560 561 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 562 563 action_data->action = NFP_NET_CMSG_ACTION_DROP; 564 } 565 566 static void 567 nfp_net_flow_action_mark(struct rte_flow *nfp_flow, 568 const struct rte_flow_action *action) 569 { 570 struct nfp_net_cmsg_action *action_data; 571 const struct rte_flow_action_mark *mark; 572 573 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 574 mark = action->conf; 575 576 action_data->action |= NFP_NET_CMSG_ACTION_MARK; 577 action_data->mark_id = mark->id; 578 } 579 580 static int 581 nfp_net_flow_action_queue(struct rte_eth_dev *dev, 582 struct rte_flow *nfp_flow, 583 const struct rte_flow_action *action) 584 { 585 struct nfp_net_cmsg_action *action_data; 586 const struct rte_flow_action_queue *queue; 587 588 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 589 queue = action->conf; 590 if (queue->index >= dev->data->nb_rx_queues || 591 dev->data->rx_queues[queue->index] == NULL) { 592 PMD_DRV_LOG(ERR, "Queue index is illegal"); 593 return -EINVAL; 594 } 595 596 action_data->action |= NFP_NET_CMSG_ACTION_QUEUE; 597 action_data->queue = queue->index; 598 599 return 0; 600 } 601 602 static int 603 nfp_net_flow_compile_actions(struct rte_eth_dev *dev, 604 const struct rte_flow_action actions[], 605 struct rte_flow *nfp_flow) 606 { 607 int ret = 0; 608 const struct rte_flow_action *action; 609 610 for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { 611 switch (action->type) { 612 case RTE_FLOW_ACTION_TYPE_DROP: 613 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP"); 614 nfp_net_flow_action_drop(nfp_flow); 615 return 0; 616 case RTE_FLOW_ACTION_TYPE_MARK: 617 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK"); 618 nfp_net_flow_action_mark(nfp_flow, action); 619 break; 620 case RTE_FLOW_ACTION_TYPE_QUEUE: 621 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE"); 622 ret = nfp_net_flow_action_queue(dev, nfp_flow, action); 623 break; 624 default: 625 PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); 626 return -ENOTSUP; 627 } 628 } 629 630 return ret; 631 } 632 633 static void 634 nfp_net_flow_process_priority(struct rte_flow *nfp_flow, 635 uint32_t match_len) 636 { 637 struct nfp_net_cmsg_match_v4 *ipv4; 638 struct nfp_net_cmsg_match_v6 *ipv6; 639 640 switch (match_len) { 641 case sizeof(struct nfp_net_cmsg_match_v4): 642 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 643 ipv4->position = nfp_flow->position; 644 break; 645 case sizeof(struct nfp_net_cmsg_match_v6): 646 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 647 ipv6->position = nfp_flow->position; 648 break; 649 default: 650 break; 651 } 652 } 653 654 static int 655 nfp_net_flow_check_count(struct nfp_net_flow_count *flow_count, 656 uint32_t item_type) 657 { 658 int ret = 0; 659 660 switch (item_type) { 661 case RTE_FLOW_ITEM_TYPE_ETH: 662 if (flow_count->eth_count >= NFP_NET_ETH_FLOW_LIMIT) 663 ret = -ENOSPC; 664 break; 665 case RTE_FLOW_ITEM_TYPE_IPV4: 666 if (flow_count->ipv4_count >= NFP_NET_IPV4_FLOW_LIMIT) 667 ret = -ENOSPC; 668 break; 669 case RTE_FLOW_ITEM_TYPE_IPV6: 670 if (flow_count->ipv6_count >= NFP_NET_IPV6_FLOW_LIMIT) 671 ret = -ENOSPC; 672 break; 673 default: 674 ret = -ENOTSUP; 675 break; 676 } 677 678 return ret; 679 } 680 681 static int 682 nfp_net_flow_calculate_count(struct rte_flow *nfp_flow, 683 struct nfp_net_flow_count *flow_count, 684 bool delete_flag) 685 { 686 uint16_t *count; 687 688 switch (nfp_flow->payload.cmsg_type) { 689 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 690 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V4: 691 count = &flow_count->ipv4_count; 692 break; 693 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 694 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V6: 695 count = &flow_count->ipv6_count; 696 break; 697 case NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE: 698 case NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE: 699 count = &flow_count->eth_count; 700 break; 701 default: 702 PMD_DRV_LOG(ERR, "Flow count calculate failed."); 703 return -EINVAL; 704 } 705 706 if (delete_flag) 707 (*count)--; 708 else 709 (*count)++; 710 711 return 0; 712 } 713 714 static struct rte_flow * 715 nfp_net_flow_setup(struct rte_eth_dev *dev, 716 const struct rte_flow_attr *attr, 717 const struct rte_flow_item items[], 718 const struct rte_flow_action actions[]) 719 { 720 int ret; 721 char *hash_data; 722 uint32_t port_id; 723 uint32_t item_type; 724 uint32_t action_len; 725 struct nfp_net_hw *hw; 726 uint32_t match_len = 0; 727 struct nfp_net_priv *priv; 728 struct rte_flow *nfp_flow; 729 struct rte_flow *flow_find; 730 struct nfp_net_hw_priv *hw_priv; 731 struct nfp_app_fw_nic *app_fw_nic; 732 733 hw = dev->data->dev_private; 734 hw_priv = dev->process_private; 735 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 736 priv = app_fw_nic->ports[hw->idx]->priv; 737 738 ret = nfp_net_flow_calculate_items(items, &match_len, &item_type); 739 if (ret != 0) { 740 PMD_DRV_LOG(ERR, "Key layers calculate failed."); 741 return NULL; 742 } 743 744 ret = nfp_net_flow_check_count(&priv->flow_count, item_type); 745 if (ret != 0) { 746 PMD_DRV_LOG(ERR, "Flow count check failed."); 747 return NULL; 748 } 749 750 action_len = sizeof(struct nfp_net_cmsg_action); 751 port_id = ((struct nfp_net_hw *)dev->data->dev_private)->nfp_idx; 752 753 nfp_flow = nfp_net_flow_alloc(priv, attr->priority, match_len, action_len, port_id); 754 if (nfp_flow == NULL) { 755 PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); 756 return NULL; 757 } 758 759 ret = nfp_net_flow_compile_items(items, nfp_flow); 760 if (ret != 0) { 761 PMD_DRV_LOG(ERR, "NFP flow item process failed."); 762 goto free_flow; 763 } 764 765 ret = nfp_net_flow_compile_actions(dev, actions, nfp_flow); 766 if (ret != 0) { 767 PMD_DRV_LOG(ERR, "NFP flow action process failed."); 768 goto free_flow; 769 } 770 771 /* Calculate and store the hash_key for later use */ 772 hash_data = nfp_flow->payload.match_data; 773 nfp_flow->hash_key = rte_jhash(hash_data, match_len + action_len, 774 priv->hash_seed); 775 776 /* Find the flow in hash table */ 777 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 778 if (flow_find != NULL) { 779 PMD_DRV_LOG(ERR, "This flow is already exist."); 780 goto free_flow; 781 } 782 783 ret = nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, false); 784 if (ret != 0) { 785 PMD_DRV_LOG(ERR, "NFP flow calculate count failed."); 786 goto free_flow; 787 } 788 789 nfp_net_flow_process_priority(nfp_flow, match_len); 790 791 return nfp_flow; 792 793 free_flow: 794 nfp_net_flow_free(priv, nfp_flow); 795 796 return NULL; 797 } 798 799 static int 800 nfp_net_flow_teardown(struct nfp_net_priv *priv, 801 struct rte_flow *nfp_flow) 802 { 803 return nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, true); 804 } 805 806 static int 807 nfp_net_flow_offload(struct nfp_net_hw *hw, 808 struct rte_flow *flow, 809 bool delete_flag) 810 { 811 int ret; 812 char *tmp; 813 uint32_t msg_size; 814 struct nfp_net_cmsg *cmsg; 815 816 msg_size = sizeof(uint32_t) + flow->payload.match_len + 817 flow->payload.action_len; 818 cmsg = nfp_net_cmsg_alloc(msg_size); 819 if (cmsg == NULL) { 820 PMD_DRV_LOG(ERR, "Alloc cmsg failed."); 821 return -ENOMEM; 822 } 823 824 cmsg->cmd = flow->payload.cmsg_type; 825 if (delete_flag) 826 cmsg->cmd++; 827 828 tmp = (char *)cmsg->data; 829 rte_memcpy(tmp, flow->payload.match_data, flow->payload.match_len); 830 tmp += flow->payload.match_len; 831 rte_memcpy(tmp, flow->payload.action_data, flow->payload.action_len); 832 833 ret = nfp_net_cmsg_xmit(hw, cmsg, msg_size); 834 if (ret != 0) { 835 PMD_DRV_LOG(ERR, "Send cmsg failed."); 836 ret = -EINVAL; 837 goto free_cmsg; 838 } 839 840 free_cmsg: 841 nfp_net_cmsg_free(cmsg); 842 843 return ret; 844 } 845 846 static int 847 nfp_net_flow_validate(struct rte_eth_dev *dev, 848 const struct rte_flow_attr *attr, 849 const struct rte_flow_item items[], 850 const struct rte_flow_action actions[], 851 struct rte_flow_error *error) 852 { 853 int ret; 854 struct nfp_net_hw *hw; 855 struct rte_flow *nfp_flow; 856 struct nfp_net_priv *priv; 857 struct nfp_net_hw_priv *hw_priv; 858 struct nfp_app_fw_nic *app_fw_nic; 859 860 hw = dev->data->dev_private; 861 hw_priv = dev->process_private; 862 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 863 priv = app_fw_nic->ports[hw->idx]->priv; 864 865 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 866 if (nfp_flow == NULL) { 867 return rte_flow_error_set(error, ENOTSUP, 868 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 869 NULL, "This flow can not be offloaded."); 870 } 871 872 ret = nfp_net_flow_teardown(priv, nfp_flow); 873 if (ret != 0) { 874 return rte_flow_error_set(error, EINVAL, 875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 876 NULL, "Flow resource free failed."); 877 } 878 879 nfp_net_flow_free(priv, nfp_flow); 880 881 return 0; 882 } 883 884 static struct rte_flow * 885 nfp_net_flow_create(struct rte_eth_dev *dev, 886 const struct rte_flow_attr *attr, 887 const struct rte_flow_item items[], 888 const struct rte_flow_action actions[], 889 struct rte_flow_error *error) 890 { 891 int ret; 892 struct nfp_net_hw *hw; 893 struct rte_flow *nfp_flow; 894 struct nfp_net_priv *priv; 895 struct nfp_net_hw_priv *hw_priv; 896 struct nfp_app_fw_nic *app_fw_nic; 897 898 hw = dev->data->dev_private; 899 hw_priv = dev->process_private; 900 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 901 priv = app_fw_nic->ports[hw->idx]->priv; 902 903 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 904 if (nfp_flow == NULL) { 905 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 906 NULL, "This flow can not be offloaded."); 907 return NULL; 908 } 909 910 /* Add the flow to flow hash table */ 911 ret = nfp_net_flow_table_add(priv, nfp_flow); 912 if (ret != 0) { 913 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 914 NULL, "Add flow to the flow table failed."); 915 goto flow_teardown; 916 } 917 918 /* Add the flow to hardware */ 919 ret = nfp_net_flow_offload(hw, nfp_flow, false); 920 if (ret != 0) { 921 rte_flow_error_set(error, EINVAL, 922 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 923 NULL, "Add flow to firmware failed."); 924 goto table_delete; 925 } 926 927 return nfp_flow; 928 929 table_delete: 930 nfp_net_flow_table_delete(priv, nfp_flow); 931 flow_teardown: 932 nfp_net_flow_teardown(priv, nfp_flow); 933 nfp_net_flow_free(priv, nfp_flow); 934 935 return NULL; 936 } 937 938 static int 939 nfp_net_flow_destroy(struct rte_eth_dev *dev, 940 struct rte_flow *nfp_flow, 941 struct rte_flow_error *error) 942 { 943 int ret; 944 struct nfp_net_hw *hw; 945 struct nfp_net_priv *priv; 946 struct rte_flow *flow_find; 947 struct nfp_net_hw_priv *hw_priv; 948 struct nfp_app_fw_nic *app_fw_nic; 949 950 hw = dev->data->dev_private; 951 hw_priv = dev->process_private; 952 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 953 priv = app_fw_nic->ports[hw->idx]->priv; 954 955 /* Find the flow in flow hash table */ 956 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 957 if (flow_find == NULL) { 958 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 959 NULL, "Flow does not exist."); 960 ret = -EINVAL; 961 goto exit; 962 } 963 964 /* Delete the flow from hardware */ 965 ret = nfp_net_flow_offload(hw, nfp_flow, true); 966 if (ret != 0) { 967 rte_flow_error_set(error, EINVAL, 968 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 969 NULL, "Delete flow from firmware failed."); 970 ret = -EINVAL; 971 goto exit; 972 } 973 974 /* Delete the flow from flow hash table */ 975 ret = nfp_net_flow_table_delete(priv, nfp_flow); 976 if (ret != 0) { 977 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 978 NULL, "Delete flow from the flow table failed."); 979 ret = -EINVAL; 980 goto exit; 981 } 982 983 ret = nfp_net_flow_teardown(priv, nfp_flow); 984 if (ret != 0) { 985 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 986 NULL, "Flow teardown failed."); 987 ret = -EINVAL; 988 goto exit; 989 } 990 991 exit: 992 nfp_net_flow_free(priv, nfp_flow); 993 994 return ret; 995 } 996 997 static int 998 nfp_net_flow_flush(struct rte_eth_dev *dev, 999 struct rte_flow_error *error) 1000 { 1001 int ret = 0; 1002 void *next_data; 1003 uint32_t iter = 0; 1004 const void *next_key; 1005 struct nfp_net_hw *hw; 1006 struct rte_flow *nfp_flow; 1007 struct rte_hash *flow_table; 1008 struct nfp_net_hw_priv *hw_priv; 1009 struct nfp_app_fw_nic *app_fw_nic; 1010 1011 hw = dev->data->dev_private; 1012 hw_priv = dev->process_private; 1013 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 1014 flow_table = app_fw_nic->ports[hw->idx]->priv->flow_table; 1015 1016 while (rte_hash_iterate(flow_table, &next_key, &next_data, &iter) >= 0) { 1017 nfp_flow = next_data; 1018 ret = nfp_net_flow_destroy(dev, nfp_flow, error); 1019 if (ret != 0) 1020 break; 1021 } 1022 1023 return ret; 1024 } 1025 1026 static const struct rte_flow_ops nfp_net_flow_ops = { 1027 .validate = nfp_net_flow_validate, 1028 .create = nfp_net_flow_create, 1029 .destroy = nfp_net_flow_destroy, 1030 .flush = nfp_net_flow_flush, 1031 }; 1032 1033 int 1034 nfp_net_flow_ops_get(struct rte_eth_dev *dev, 1035 const struct rte_flow_ops **ops) 1036 { 1037 struct nfp_net_hw *hw; 1038 1039 if (rte_eth_dev_is_repr(dev)) { 1040 *ops = NULL; 1041 PMD_DRV_LOG(ERR, "Port is a representor."); 1042 return -EINVAL; 1043 } 1044 1045 hw = dev->data->dev_private; 1046 if ((hw->super.ctrl_ext & NFP_NET_CFG_CTRL_FLOW_STEER) == 0) { 1047 *ops = NULL; 1048 return 0; 1049 } 1050 1051 *ops = &nfp_net_flow_ops; 1052 1053 return 0; 1054 } 1055 1056 int 1057 nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev, 1058 uint16_t port) 1059 { 1060 int ret = 0; 1061 struct nfp_net_priv *priv; 1062 char flow_name[RTE_HASH_NAMESIZE]; 1063 struct nfp_app_fw_nic *app_fw_nic; 1064 const char *pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; 1065 1066 snprintf(flow_name, sizeof(flow_name), "%s_fl_%u", pci_name, port); 1067 1068 struct rte_hash_parameters flow_hash_params = { 1069 .name = flow_name, 1070 .entries = NFP_NET_FLOW_HASH_TBALE_SIZE, 1071 .hash_func = rte_jhash, 1072 .socket_id = rte_socket_id(), 1073 .key_len = sizeof(uint32_t), 1074 .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, 1075 }; 1076 1077 priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0); 1078 if (priv == NULL) { 1079 PMD_INIT_LOG(ERR, "NFP app nic priv creation failed"); 1080 ret = -ENOMEM; 1081 goto exit; 1082 } 1083 1084 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1085 app_fw_nic->ports[port]->priv = priv; 1086 priv->hash_seed = (uint32_t)rte_rand(); 1087 1088 /* Flow table */ 1089 flow_hash_params.hash_func_init_val = priv->hash_seed; 1090 priv->flow_table = rte_hash_create(&flow_hash_params); 1091 if (priv->flow_table == NULL) { 1092 PMD_INIT_LOG(ERR, "flow hash table creation failed"); 1093 ret = -ENOMEM; 1094 goto free_priv; 1095 } 1096 1097 return 0; 1098 1099 free_priv: 1100 rte_free(priv); 1101 exit: 1102 return ret; 1103 } 1104 1105 void 1106 nfp_net_flow_priv_uninit(struct nfp_pf_dev *pf_dev, 1107 uint16_t port) 1108 { 1109 struct nfp_net_priv *priv; 1110 struct nfp_app_fw_nic *app_fw_nic; 1111 1112 if (pf_dev == NULL) 1113 return; 1114 1115 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1116 priv = app_fw_nic->ports[port]->priv; 1117 if (priv != NULL) 1118 rte_hash_free(priv->flow_table); 1119 1120 rte_free(priv); 1121 } 1122