1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Corigine, Inc. 3 * All rights reserved. 4 */ 5 6 #include "nfp_net_flow.h" 7 8 #include <rte_flow_driver.h> 9 #include <rte_hash.h> 10 #include <rte_jhash.h> 11 #include <rte_malloc.h> 12 13 #include "nfp_logs.h" 14 #include "nfp_net_cmsg.h" 15 16 /* Static initializer for a list of subsequent item types */ 17 #define NEXT_ITEM(...) \ 18 ((const enum rte_flow_item_type []){ \ 19 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ 20 }) 21 22 /* Process structure associated with a flow item */ 23 struct nfp_net_flow_item_proc { 24 /* Bit-mask for fields supported by this PMD. */ 25 const void *mask_support; 26 /* Bit-mask to use when @p item->mask is not provided. */ 27 const void *mask_default; 28 /* Size in bytes for @p mask_support and @p mask_default. */ 29 const uint32_t mask_sz; 30 /* Merge a pattern item into a flow rule handle. */ 31 int (*merge)(struct rte_flow *nfp_flow, 32 const struct rte_flow_item *item, 33 const struct nfp_net_flow_item_proc *proc); 34 /* List of possible subsequent items. */ 35 const enum rte_flow_item_type *const next_item; 36 }; 37 38 static int 39 nfp_net_flow_table_add(struct nfp_net_priv *priv, 40 struct rte_flow *nfp_flow) 41 { 42 int ret; 43 44 ret = rte_hash_add_key_data(priv->flow_table, &nfp_flow->hash_key, nfp_flow); 45 if (ret != 0) { 46 PMD_DRV_LOG(ERR, "Add to flow table failed."); 47 return ret; 48 } 49 50 return 0; 51 } 52 53 static int 54 nfp_net_flow_table_delete(struct nfp_net_priv *priv, 55 struct rte_flow *nfp_flow) 56 { 57 int ret; 58 59 ret = rte_hash_del_key(priv->flow_table, &nfp_flow->hash_key); 60 if (ret < 0) { 61 PMD_DRV_LOG(ERR, "Delete from flow table failed."); 62 return ret; 63 } 64 65 return 0; 66 } 67 68 static struct rte_flow * 69 nfp_net_flow_table_search(struct nfp_net_priv *priv, 70 struct rte_flow *nfp_flow) 71 { 72 int index; 73 struct rte_flow *flow_find; 74 75 index = rte_hash_lookup_data(priv->flow_table, &nfp_flow->hash_key, 76 (void **)&flow_find); 77 if (index < 0) { 78 PMD_DRV_LOG(DEBUG, "Data NOT found in the flow table."); 79 return NULL; 80 } 81 82 return flow_find; 83 } 84 85 static int 86 nfp_net_flow_position_acquire(struct nfp_net_priv *priv, 87 uint32_t priority, 88 struct rte_flow *nfp_flow) 89 { 90 uint32_t i; 91 uint32_t limit; 92 93 limit = priv->flow_limit; 94 95 if (priority != 0) { 96 i = limit - priority - 1; 97 98 if (priv->flow_position[i]) { 99 PMD_DRV_LOG(ERR, "There is already a flow rule in this place."); 100 return -EAGAIN; 101 } 102 103 priv->flow_position[i] = true; 104 nfp_flow->position = priority; 105 return 0; 106 } 107 108 for (i = 0; i < limit; i++) { 109 if (!priv->flow_position[i]) { 110 priv->flow_position[i] = true; 111 break; 112 } 113 } 114 115 if (i == limit) { 116 PMD_DRV_LOG(ERR, "The limited flow number is reach."); 117 return -ERANGE; 118 } 119 120 nfp_flow->position = limit - i - 1; 121 122 return 0; 123 } 124 125 static void 126 nfp_net_flow_position_free(struct nfp_net_priv *priv, 127 struct rte_flow *nfp_flow) 128 { 129 uint32_t index; 130 131 index = NFP_NET_FLOW_LIMIT - 1 - nfp_flow->position; 132 133 priv->flow_position[index] = false; 134 } 135 136 static struct rte_flow * 137 nfp_net_flow_alloc(struct nfp_net_priv *priv, 138 uint32_t priority, 139 uint32_t match_len, 140 uint32_t action_len, 141 uint32_t port_id) 142 { 143 int ret; 144 char *data; 145 struct rte_flow *nfp_flow; 146 struct nfp_net_flow_payload *payload; 147 148 nfp_flow = rte_zmalloc("nfp_flow", sizeof(struct rte_flow), 0); 149 if (nfp_flow == NULL) 150 return NULL; 151 152 data = rte_zmalloc("nfp_flow_payload", match_len + action_len, 0); 153 if (data == NULL) 154 goto free_flow; 155 156 ret = nfp_net_flow_position_acquire(priv, priority, nfp_flow); 157 if (ret != 0) 158 goto free_payload; 159 160 nfp_flow->port_id = port_id; 161 payload = &nfp_flow->payload; 162 payload->match_len = match_len; 163 payload->action_len = action_len; 164 payload->match_data = data; 165 payload->action_data = data + match_len; 166 167 return nfp_flow; 168 169 free_payload: 170 rte_free(data); 171 free_flow: 172 rte_free(nfp_flow); 173 174 return NULL; 175 } 176 177 static void 178 nfp_net_flow_free(struct nfp_net_priv *priv, 179 struct rte_flow *nfp_flow) 180 { 181 nfp_net_flow_position_free(priv, nfp_flow); 182 rte_free(nfp_flow->payload.match_data); 183 rte_free(nfp_flow); 184 } 185 186 static int 187 nfp_net_flow_calculate_items(const struct rte_flow_item items[], 188 uint32_t *match_len, 189 uint32_t *item_type) 190 { 191 int ret = -EINVAL; 192 const struct rte_flow_item *item; 193 194 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 195 switch (item->type) { 196 case RTE_FLOW_ITEM_TYPE_ETH: 197 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected"); 198 *match_len = sizeof(struct nfp_net_cmsg_match_eth); 199 *item_type = RTE_FLOW_ITEM_TYPE_ETH; 200 ret = 0; 201 break; 202 case RTE_FLOW_ITEM_TYPE_IPV4: 203 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected"); 204 *match_len = sizeof(struct nfp_net_cmsg_match_v4); 205 *item_type = RTE_FLOW_ITEM_TYPE_IPV4; 206 return 0; 207 case RTE_FLOW_ITEM_TYPE_IPV6: 208 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected"); 209 *match_len = sizeof(struct nfp_net_cmsg_match_v6); 210 *item_type = RTE_FLOW_ITEM_TYPE_IPV6; 211 return 0; 212 default: 213 PMD_DRV_LOG(ERR, "Can't calculate match length"); 214 *match_len = 0; 215 return -ENOTSUP; 216 } 217 } 218 219 return ret; 220 } 221 222 static int 223 nfp_net_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow, 224 const struct rte_flow_item *item, 225 __rte_unused const struct nfp_net_flow_item_proc *proc) 226 { 227 struct nfp_net_cmsg_match_eth *eth; 228 const struct rte_flow_item_eth *spec; 229 230 spec = item->spec; 231 if (spec == NULL) { 232 PMD_DRV_LOG(ERR, "NFP flow merge eth: no item->spec!"); 233 return -EINVAL; 234 } 235 236 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE; 237 238 eth = (struct nfp_net_cmsg_match_eth *)nfp_flow->payload.match_data; 239 eth->ether_type = rte_be_to_cpu_16(spec->type); 240 241 return 0; 242 } 243 244 static int 245 nfp_net_flow_merge_ipv4(struct rte_flow *nfp_flow, 246 const struct rte_flow_item *item, 247 const struct nfp_net_flow_item_proc *proc) 248 { 249 struct nfp_net_cmsg_match_v4 *ipv4; 250 const struct rte_flow_item_ipv4 *mask; 251 const struct rte_flow_item_ipv4 *spec; 252 253 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V4; 254 255 spec = item->spec; 256 if (spec == NULL) { 257 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!"); 258 return 0; 259 } 260 261 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 262 263 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 264 265 ipv4->l4_protocol_mask = mask->hdr.next_proto_id; 266 ipv4->src_ipv4_mask = rte_be_to_cpu_32(mask->hdr.src_addr); 267 ipv4->dst_ipv4_mask = rte_be_to_cpu_32(mask->hdr.dst_addr); 268 269 ipv4->l4_protocol = spec->hdr.next_proto_id; 270 ipv4->src_ipv4 = rte_be_to_cpu_32(spec->hdr.src_addr); 271 ipv4->dst_ipv4 = rte_be_to_cpu_32(spec->hdr.dst_addr); 272 273 return 0; 274 } 275 276 static int 277 nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow, 278 const struct rte_flow_item *item, 279 const struct nfp_net_flow_item_proc *proc) 280 { 281 uint32_t i; 282 struct nfp_net_cmsg_match_v6 *ipv6; 283 const struct rte_flow_item_ipv6 *mask; 284 const struct rte_flow_item_ipv6 *spec; 285 286 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V6; 287 288 spec = item->spec; 289 if (spec == NULL) { 290 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!"); 291 return 0; 292 } 293 294 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 295 296 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 297 298 ipv6->l4_protocol_mask = mask->hdr.proto; 299 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 300 ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3]; 301 ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2]; 302 ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1]; 303 ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i]; 304 305 ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3]; 306 ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2]; 307 ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1]; 308 ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i]; 309 } 310 311 ipv6->l4_protocol = spec->hdr.proto; 312 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 313 ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3]; 314 ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2]; 315 ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1]; 316 ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i]; 317 318 ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3]; 319 ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2]; 320 ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1]; 321 ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i]; 322 } 323 324 return 0; 325 } 326 327 static int 328 nfp_flow_merge_l4(struct rte_flow *nfp_flow, 329 const struct rte_flow_item *item, 330 const struct nfp_net_flow_item_proc *proc) 331 { 332 const struct rte_flow_item_tcp *mask; 333 const struct rte_flow_item_tcp *spec; 334 struct nfp_net_cmsg_match_v4 *ipv4 = NULL; 335 struct nfp_net_cmsg_match_v6 *ipv6 = NULL; 336 337 spec = item->spec; 338 if (spec == NULL) { 339 PMD_DRV_LOG(ERR, "NFP flow merge tcp: no item->spec!"); 340 return -EINVAL; 341 } 342 343 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 344 345 switch (nfp_flow->payload.cmsg_type) { 346 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 347 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 348 break; 349 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 350 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 351 break; 352 default: 353 PMD_DRV_LOG(ERR, "L3 layer neither IPv4 nor IPv6."); 354 return -EINVAL; 355 } 356 357 if (ipv4 != NULL) { 358 ipv4->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 359 ipv4->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 360 361 ipv4->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 362 ipv4->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 363 } else if (ipv6 != NULL) { 364 ipv6->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 365 ipv6->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 366 367 ipv6->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 368 ipv6->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 369 } else { 370 PMD_DRV_LOG(ERR, "No valid L3 layer pointer."); 371 return -EINVAL; 372 } 373 374 return 0; 375 } 376 377 /* Graph of supported items and associated process function */ 378 static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = { 379 [RTE_FLOW_ITEM_TYPE_END] = { 380 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH, 381 RTE_FLOW_ITEM_TYPE_IPV4, 382 RTE_FLOW_ITEM_TYPE_IPV6), 383 }, 384 [RTE_FLOW_ITEM_TYPE_ETH] = { 385 .merge = nfp_net_flow_merge_eth, 386 }, 387 [RTE_FLOW_ITEM_TYPE_IPV4] = { 388 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 389 RTE_FLOW_ITEM_TYPE_UDP, 390 RTE_FLOW_ITEM_TYPE_SCTP), 391 .mask_support = &(const struct rte_flow_item_ipv4){ 392 .hdr = { 393 .next_proto_id = 0xff, 394 .src_addr = RTE_BE32(0xffffffff), 395 .dst_addr = RTE_BE32(0xffffffff), 396 }, 397 }, 398 .mask_default = &rte_flow_item_ipv4_mask, 399 .mask_sz = sizeof(struct rte_flow_item_ipv4), 400 .merge = nfp_net_flow_merge_ipv4, 401 }, 402 [RTE_FLOW_ITEM_TYPE_IPV6] = { 403 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 404 RTE_FLOW_ITEM_TYPE_UDP, 405 RTE_FLOW_ITEM_TYPE_SCTP), 406 .mask_support = &(const struct rte_flow_item_ipv6){ 407 .hdr = { 408 .proto = 0xff, 409 .src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 410 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 411 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 412 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 413 }, 414 }, 415 .mask_default = &rte_flow_item_ipv6_mask, 416 .mask_sz = sizeof(struct rte_flow_item_ipv6), 417 .merge = nfp_net_flow_merge_ipv6, 418 }, 419 [RTE_FLOW_ITEM_TYPE_TCP] = { 420 .mask_support = &(const struct rte_flow_item_tcp){ 421 .hdr = { 422 .src_port = RTE_BE16(0xffff), 423 .dst_port = RTE_BE16(0xffff), 424 }, 425 }, 426 .mask_default = &rte_flow_item_tcp_mask, 427 .mask_sz = sizeof(struct rte_flow_item_tcp), 428 .merge = nfp_flow_merge_l4, 429 }, 430 [RTE_FLOW_ITEM_TYPE_UDP] = { 431 .mask_support = &(const struct rte_flow_item_udp){ 432 .hdr = { 433 .src_port = RTE_BE16(0xffff), 434 .dst_port = RTE_BE16(0xffff), 435 }, 436 }, 437 .mask_default = &rte_flow_item_udp_mask, 438 .mask_sz = sizeof(struct rte_flow_item_udp), 439 .merge = nfp_flow_merge_l4, 440 }, 441 [RTE_FLOW_ITEM_TYPE_SCTP] = { 442 .mask_support = &(const struct rte_flow_item_sctp){ 443 .hdr = { 444 .src_port = RTE_BE16(0xffff), 445 .dst_port = RTE_BE16(0xffff), 446 }, 447 }, 448 .mask_default = &rte_flow_item_sctp_mask, 449 .mask_sz = sizeof(struct rte_flow_item_sctp), 450 .merge = nfp_flow_merge_l4, 451 }, 452 }; 453 454 static int 455 nfp_net_flow_item_check(const struct rte_flow_item *item, 456 const struct nfp_net_flow_item_proc *proc) 457 { 458 uint32_t i; 459 int ret = 0; 460 const uint8_t *mask; 461 462 /* item->last and item->mask cannot exist without item->spec. */ 463 if (item->spec == NULL) { 464 if (item->mask || item->last) { 465 PMD_DRV_LOG(ERR, "'mask' or 'last' field provided" 466 " without a corresponding 'spec'."); 467 return -EINVAL; 468 } 469 470 /* No spec, no mask, no problem. */ 471 return 0; 472 } 473 474 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 475 476 /* 477 * Single-pass check to make sure that: 478 * - Mask is supported, no bits are set outside proc->mask_support. 479 * - Both item->spec and item->last are included in mask. 480 */ 481 for (i = 0; i != proc->mask_sz; ++i) { 482 if (mask[i] == 0) 483 continue; 484 485 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) != 486 ((const uint8_t *)proc->mask_support)[i]) { 487 PMD_DRV_LOG(ERR, "Unsupported field found in 'mask'."); 488 ret = -EINVAL; 489 break; 490 } 491 492 if (item->last != NULL && 493 (((const uint8_t *)item->spec)[i] & mask[i]) != 494 (((const uint8_t *)item->last)[i] & mask[i])) { 495 PMD_DRV_LOG(ERR, "Range between 'spec' and 'last'" 496 " is larger than 'mask'."); 497 ret = -ERANGE; 498 break; 499 } 500 } 501 502 return ret; 503 } 504 505 static int 506 nfp_net_flow_compile_items(const struct rte_flow_item items[], 507 struct rte_flow *nfp_flow) 508 { 509 uint32_t i; 510 int ret = 0; 511 const struct rte_flow_item *item; 512 const struct nfp_net_flow_item_proc *proc_list; 513 514 proc_list = nfp_net_flow_item_proc_list; 515 516 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 517 const struct nfp_net_flow_item_proc *proc = NULL; 518 519 for (i = 0; (proc_list->next_item != NULL) && 520 (proc_list->next_item[i] != RTE_FLOW_ITEM_TYPE_END); ++i) { 521 if (proc_list->next_item[i] == item->type) { 522 proc = &nfp_net_flow_item_proc_list[item->type]; 523 break; 524 } 525 } 526 527 if (proc == NULL) { 528 PMD_DRV_LOG(ERR, "No next item provided for %d", item->type); 529 ret = -ENOTSUP; 530 break; 531 } 532 533 /* Perform basic sanity checks */ 534 ret = nfp_net_flow_item_check(item, proc); 535 if (ret != 0) { 536 PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type); 537 ret = -EINVAL; 538 break; 539 } 540 541 if (proc->merge == NULL) { 542 PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type); 543 ret = -ENOTSUP; 544 break; 545 } 546 547 ret = proc->merge(nfp_flow, item, proc); 548 if (ret != 0) { 549 PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type); 550 break; 551 } 552 553 proc_list = proc; 554 } 555 556 return ret; 557 } 558 559 static void 560 nfp_net_flow_action_drop(struct rte_flow *nfp_flow) 561 { 562 struct nfp_net_cmsg_action *action_data; 563 564 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 565 566 action_data->action = NFP_NET_CMSG_ACTION_DROP; 567 } 568 569 static void 570 nfp_net_flow_action_mark(struct rte_flow *nfp_flow, 571 const struct rte_flow_action *action) 572 { 573 struct nfp_net_cmsg_action *action_data; 574 const struct rte_flow_action_mark *mark; 575 576 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 577 mark = action->conf; 578 579 action_data->action |= NFP_NET_CMSG_ACTION_MARK; 580 action_data->mark_id = mark->id; 581 } 582 583 static int 584 nfp_net_flow_action_queue(struct rte_eth_dev *dev, 585 struct rte_flow *nfp_flow, 586 const struct rte_flow_action *action) 587 { 588 struct nfp_net_cmsg_action *action_data; 589 const struct rte_flow_action_queue *queue; 590 591 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 592 queue = action->conf; 593 if (queue->index >= dev->data->nb_rx_queues || 594 dev->data->rx_queues[queue->index] == NULL) { 595 PMD_DRV_LOG(ERR, "Queue index is illegal"); 596 return -EINVAL; 597 } 598 599 action_data->action |= NFP_NET_CMSG_ACTION_QUEUE; 600 action_data->queue = queue->index; 601 602 return 0; 603 } 604 605 static int 606 nfp_net_flow_compile_actions(struct rte_eth_dev *dev, 607 const struct rte_flow_action actions[], 608 struct rte_flow *nfp_flow) 609 { 610 int ret = 0; 611 const struct rte_flow_action *action; 612 613 for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { 614 switch (action->type) { 615 case RTE_FLOW_ACTION_TYPE_DROP: 616 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP"); 617 nfp_net_flow_action_drop(nfp_flow); 618 return 0; 619 case RTE_FLOW_ACTION_TYPE_MARK: 620 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK"); 621 nfp_net_flow_action_mark(nfp_flow, action); 622 break; 623 case RTE_FLOW_ACTION_TYPE_QUEUE: 624 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE"); 625 ret = nfp_net_flow_action_queue(dev, nfp_flow, action); 626 break; 627 default: 628 PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); 629 return -ENOTSUP; 630 } 631 } 632 633 return ret; 634 } 635 636 static void 637 nfp_net_flow_process_priority(struct rte_flow *nfp_flow, 638 uint32_t match_len) 639 { 640 struct nfp_net_cmsg_match_v4 *ipv4; 641 struct nfp_net_cmsg_match_v6 *ipv6; 642 643 switch (match_len) { 644 case sizeof(struct nfp_net_cmsg_match_v4): 645 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 646 ipv4->position = nfp_flow->position; 647 break; 648 case sizeof(struct nfp_net_cmsg_match_v6): 649 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 650 ipv6->position = nfp_flow->position; 651 break; 652 default: 653 break; 654 } 655 } 656 657 static int 658 nfp_net_flow_check_count(struct nfp_net_flow_count *flow_count, 659 uint32_t item_type) 660 { 661 int ret = 0; 662 663 switch (item_type) { 664 case RTE_FLOW_ITEM_TYPE_ETH: 665 if (flow_count->eth_count >= NFP_NET_ETH_FLOW_LIMIT) 666 ret = -ENOSPC; 667 break; 668 case RTE_FLOW_ITEM_TYPE_IPV4: 669 if (flow_count->ipv4_count >= NFP_NET_IPV4_FLOW_LIMIT) 670 ret = -ENOSPC; 671 break; 672 case RTE_FLOW_ITEM_TYPE_IPV6: 673 if (flow_count->ipv6_count >= NFP_NET_IPV6_FLOW_LIMIT) 674 ret = -ENOSPC; 675 break; 676 default: 677 ret = -ENOTSUP; 678 break; 679 } 680 681 return ret; 682 } 683 684 static int 685 nfp_net_flow_calculate_count(struct rte_flow *nfp_flow, 686 struct nfp_net_flow_count *flow_count, 687 bool delete_flag) 688 { 689 uint16_t *count; 690 691 switch (nfp_flow->payload.cmsg_type) { 692 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 693 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V4: 694 count = &flow_count->ipv4_count; 695 break; 696 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 697 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V6: 698 count = &flow_count->ipv6_count; 699 break; 700 case NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE: 701 case NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE: 702 count = &flow_count->eth_count; 703 break; 704 default: 705 PMD_DRV_LOG(ERR, "Flow count calculate failed."); 706 return -EINVAL; 707 } 708 709 if (delete_flag) 710 (*count)--; 711 else 712 (*count)++; 713 714 return 0; 715 } 716 717 static struct rte_flow * 718 nfp_net_flow_setup(struct rte_eth_dev *dev, 719 const struct rte_flow_attr *attr, 720 const struct rte_flow_item items[], 721 const struct rte_flow_action actions[]) 722 { 723 int ret; 724 char *hash_data; 725 uint32_t port_id; 726 uint32_t item_type; 727 uint32_t action_len; 728 struct nfp_net_hw *hw; 729 uint32_t match_len = 0; 730 struct nfp_net_priv *priv; 731 struct rte_flow *nfp_flow; 732 struct rte_flow *flow_find; 733 struct nfp_net_hw_priv *hw_priv; 734 struct nfp_app_fw_nic *app_fw_nic; 735 736 hw = dev->data->dev_private; 737 hw_priv = dev->process_private; 738 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 739 priv = app_fw_nic->ports[hw->idx]->priv; 740 741 ret = nfp_net_flow_calculate_items(items, &match_len, &item_type); 742 if (ret != 0) { 743 PMD_DRV_LOG(ERR, "Key layers calculate failed."); 744 return NULL; 745 } 746 747 ret = nfp_net_flow_check_count(&priv->flow_count, item_type); 748 if (ret != 0) { 749 PMD_DRV_LOG(ERR, "Flow count check failed."); 750 return NULL; 751 } 752 753 action_len = sizeof(struct nfp_net_cmsg_action); 754 port_id = ((struct nfp_net_hw *)dev->data->dev_private)->nfp_idx; 755 756 nfp_flow = nfp_net_flow_alloc(priv, attr->priority, match_len, action_len, port_id); 757 if (nfp_flow == NULL) { 758 PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); 759 return NULL; 760 } 761 762 ret = nfp_net_flow_compile_items(items, nfp_flow); 763 if (ret != 0) { 764 PMD_DRV_LOG(ERR, "NFP flow item process failed."); 765 goto free_flow; 766 } 767 768 ret = nfp_net_flow_compile_actions(dev, actions, nfp_flow); 769 if (ret != 0) { 770 PMD_DRV_LOG(ERR, "NFP flow action process failed."); 771 goto free_flow; 772 } 773 774 /* Calculate and store the hash_key for later use */ 775 hash_data = nfp_flow->payload.match_data; 776 nfp_flow->hash_key = rte_jhash(hash_data, match_len + action_len, 777 priv->hash_seed); 778 779 /* Find the flow in hash table */ 780 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 781 if (flow_find != NULL) { 782 PMD_DRV_LOG(ERR, "This flow is already exist."); 783 goto free_flow; 784 } 785 786 ret = nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, false); 787 if (ret != 0) { 788 PMD_DRV_LOG(ERR, "NFP flow calculate count failed."); 789 goto free_flow; 790 } 791 792 nfp_net_flow_process_priority(nfp_flow, match_len); 793 794 return nfp_flow; 795 796 free_flow: 797 nfp_net_flow_free(priv, nfp_flow); 798 799 return NULL; 800 } 801 802 static int 803 nfp_net_flow_teardown(struct nfp_net_priv *priv, 804 struct rte_flow *nfp_flow) 805 { 806 return nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, true); 807 } 808 809 static int 810 nfp_net_flow_offload(struct nfp_net_hw *hw, 811 struct rte_flow *flow, 812 bool delete_flag) 813 { 814 int ret; 815 char *tmp; 816 uint32_t msg_size; 817 struct nfp_net_cmsg *cmsg; 818 819 msg_size = sizeof(uint32_t) + flow->payload.match_len + 820 flow->payload.action_len; 821 cmsg = nfp_net_cmsg_alloc(msg_size); 822 if (cmsg == NULL) { 823 PMD_DRV_LOG(ERR, "Alloc cmsg failed."); 824 return -ENOMEM; 825 } 826 827 cmsg->cmd = flow->payload.cmsg_type; 828 if (delete_flag) 829 cmsg->cmd++; 830 831 tmp = (char *)cmsg->data; 832 rte_memcpy(tmp, flow->payload.match_data, flow->payload.match_len); 833 tmp += flow->payload.match_len; 834 rte_memcpy(tmp, flow->payload.action_data, flow->payload.action_len); 835 836 ret = nfp_net_cmsg_xmit(hw, cmsg, msg_size); 837 if (ret != 0) { 838 PMD_DRV_LOG(ERR, "Send cmsg failed."); 839 ret = -EINVAL; 840 goto free_cmsg; 841 } 842 843 free_cmsg: 844 nfp_net_cmsg_free(cmsg); 845 846 return ret; 847 } 848 849 static int 850 nfp_net_flow_validate(struct rte_eth_dev *dev, 851 const struct rte_flow_attr *attr, 852 const struct rte_flow_item items[], 853 const struct rte_flow_action actions[], 854 struct rte_flow_error *error) 855 { 856 int ret; 857 struct nfp_net_hw *hw; 858 struct rte_flow *nfp_flow; 859 struct nfp_net_priv *priv; 860 struct nfp_net_hw_priv *hw_priv; 861 struct nfp_app_fw_nic *app_fw_nic; 862 863 hw = dev->data->dev_private; 864 hw_priv = dev->process_private; 865 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 866 priv = app_fw_nic->ports[hw->idx]->priv; 867 868 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 869 if (nfp_flow == NULL) { 870 return rte_flow_error_set(error, ENOTSUP, 871 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 872 NULL, "This flow can not be offloaded."); 873 } 874 875 ret = nfp_net_flow_teardown(priv, nfp_flow); 876 if (ret != 0) { 877 return rte_flow_error_set(error, EINVAL, 878 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 879 NULL, "Flow resource free failed."); 880 } 881 882 nfp_net_flow_free(priv, nfp_flow); 883 884 return 0; 885 } 886 887 static struct rte_flow * 888 nfp_net_flow_create(struct rte_eth_dev *dev, 889 const struct rte_flow_attr *attr, 890 const struct rte_flow_item items[], 891 const struct rte_flow_action actions[], 892 struct rte_flow_error *error) 893 { 894 int ret; 895 struct nfp_net_hw *hw; 896 struct rte_flow *nfp_flow; 897 struct nfp_net_priv *priv; 898 struct nfp_net_hw_priv *hw_priv; 899 struct nfp_app_fw_nic *app_fw_nic; 900 901 hw = dev->data->dev_private; 902 hw_priv = dev->process_private; 903 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 904 priv = app_fw_nic->ports[hw->idx]->priv; 905 906 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 907 if (nfp_flow == NULL) { 908 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 909 NULL, "This flow can not be offloaded."); 910 return NULL; 911 } 912 913 /* Add the flow to flow hash table */ 914 ret = nfp_net_flow_table_add(priv, nfp_flow); 915 if (ret != 0) { 916 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 917 NULL, "Add flow to the flow table failed."); 918 goto flow_teardown; 919 } 920 921 /* Add the flow to hardware */ 922 ret = nfp_net_flow_offload(hw, nfp_flow, false); 923 if (ret != 0) { 924 rte_flow_error_set(error, EINVAL, 925 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 926 NULL, "Add flow to firmware failed."); 927 goto table_delete; 928 } 929 930 return nfp_flow; 931 932 table_delete: 933 nfp_net_flow_table_delete(priv, nfp_flow); 934 flow_teardown: 935 nfp_net_flow_teardown(priv, nfp_flow); 936 nfp_net_flow_free(priv, nfp_flow); 937 938 return NULL; 939 } 940 941 static int 942 nfp_net_flow_destroy(struct rte_eth_dev *dev, 943 struct rte_flow *nfp_flow, 944 struct rte_flow_error *error) 945 { 946 int ret; 947 struct nfp_net_hw *hw; 948 struct nfp_net_priv *priv; 949 struct rte_flow *flow_find; 950 struct nfp_net_hw_priv *hw_priv; 951 struct nfp_app_fw_nic *app_fw_nic; 952 953 hw = dev->data->dev_private; 954 hw_priv = dev->process_private; 955 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 956 priv = app_fw_nic->ports[hw->idx]->priv; 957 958 /* Find the flow in flow hash table */ 959 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 960 if (flow_find == NULL) { 961 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 962 NULL, "Flow does not exist."); 963 ret = -EINVAL; 964 goto exit; 965 } 966 967 /* Delete the flow from hardware */ 968 ret = nfp_net_flow_offload(hw, nfp_flow, true); 969 if (ret != 0) { 970 rte_flow_error_set(error, EINVAL, 971 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 972 NULL, "Delete flow from firmware failed."); 973 ret = -EINVAL; 974 goto exit; 975 } 976 977 /* Delete the flow from flow hash table */ 978 ret = nfp_net_flow_table_delete(priv, nfp_flow); 979 if (ret != 0) { 980 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 981 NULL, "Delete flow from the flow table failed."); 982 ret = -EINVAL; 983 goto exit; 984 } 985 986 ret = nfp_net_flow_teardown(priv, nfp_flow); 987 if (ret != 0) { 988 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 989 NULL, "Flow teardown failed."); 990 ret = -EINVAL; 991 goto exit; 992 } 993 994 exit: 995 nfp_net_flow_free(priv, nfp_flow); 996 997 return ret; 998 } 999 1000 static int 1001 nfp_net_flow_flush(struct rte_eth_dev *dev, 1002 struct rte_flow_error *error) 1003 { 1004 int ret = 0; 1005 void *next_data; 1006 uint32_t iter = 0; 1007 const void *next_key; 1008 struct nfp_net_hw *hw; 1009 struct rte_flow *nfp_flow; 1010 struct rte_hash *flow_table; 1011 struct nfp_net_hw_priv *hw_priv; 1012 struct nfp_app_fw_nic *app_fw_nic; 1013 1014 hw = dev->data->dev_private; 1015 hw_priv = dev->process_private; 1016 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 1017 flow_table = app_fw_nic->ports[hw->idx]->priv->flow_table; 1018 1019 while (rte_hash_iterate(flow_table, &next_key, &next_data, &iter) >= 0) { 1020 nfp_flow = next_data; 1021 ret = nfp_net_flow_destroy(dev, nfp_flow, error); 1022 if (ret != 0) 1023 break; 1024 } 1025 1026 return ret; 1027 } 1028 1029 static const struct rte_flow_ops nfp_net_flow_ops = { 1030 .validate = nfp_net_flow_validate, 1031 .create = nfp_net_flow_create, 1032 .destroy = nfp_net_flow_destroy, 1033 .flush = nfp_net_flow_flush, 1034 }; 1035 1036 int 1037 nfp_net_flow_ops_get(struct rte_eth_dev *dev, 1038 const struct rte_flow_ops **ops) 1039 { 1040 struct nfp_net_hw *hw; 1041 1042 if (rte_eth_dev_is_repr(dev)) { 1043 *ops = NULL; 1044 PMD_DRV_LOG(ERR, "Port is a representor."); 1045 return -EINVAL; 1046 } 1047 1048 hw = dev->data->dev_private; 1049 if ((hw->super.ctrl_ext & NFP_NET_CFG_CTRL_FLOW_STEER) == 0) { 1050 *ops = NULL; 1051 return 0; 1052 } 1053 1054 *ops = &nfp_net_flow_ops; 1055 1056 return 0; 1057 } 1058 1059 static uint32_t 1060 nfp_net_fs_max_entry_get(struct nfp_hw *hw) 1061 { 1062 uint32_t cnt; 1063 1064 cnt = nn_cfg_readl(hw, NFP_NET_CFG_MAX_FS_CAP); 1065 if (cnt != 0) 1066 return cnt; 1067 1068 return NFP_NET_FLOW_LIMIT; 1069 } 1070 1071 int 1072 nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev, 1073 uint16_t port) 1074 { 1075 int ret = 0; 1076 struct nfp_hw *hw; 1077 struct nfp_net_priv *priv; 1078 char flow_name[RTE_HASH_NAMESIZE]; 1079 struct nfp_app_fw_nic *app_fw_nic; 1080 const char *pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; 1081 1082 snprintf(flow_name, sizeof(flow_name), "%s_fl_%u", pci_name, port); 1083 1084 struct rte_hash_parameters flow_hash_params = { 1085 .name = flow_name, 1086 .hash_func = rte_jhash, 1087 .socket_id = rte_socket_id(), 1088 .key_len = sizeof(uint32_t), 1089 .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, 1090 }; 1091 1092 priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0); 1093 if (priv == NULL) { 1094 PMD_INIT_LOG(ERR, "NFP app nic priv creation failed"); 1095 ret = -ENOMEM; 1096 goto exit; 1097 } 1098 1099 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1100 app_fw_nic->ports[port]->priv = priv; 1101 priv->hash_seed = (uint32_t)rte_rand(); 1102 1103 /* Flow limit */ 1104 hw = &app_fw_nic->ports[port]->super; 1105 priv->flow_limit = nfp_net_fs_max_entry_get(hw); 1106 if (priv->flow_limit == 0) { 1107 PMD_INIT_LOG(ERR, "NFP app nic flow limit not right."); 1108 ret = -EINVAL; 1109 goto free_priv; 1110 } 1111 1112 /* Flow position array */ 1113 priv->flow_position = rte_zmalloc(NULL, sizeof(bool) * priv->flow_limit, 0); 1114 if (priv->flow_position == NULL) { 1115 PMD_INIT_LOG(ERR, "NFP app nic flow position creation failed."); 1116 ret = -ENOMEM; 1117 goto free_priv; 1118 } 1119 1120 /* Flow table */ 1121 flow_hash_params.hash_func_init_val = priv->hash_seed; 1122 flow_hash_params.entries = priv->flow_limit * NFP_NET_HASH_REDUNDANCE; 1123 priv->flow_table = rte_hash_create(&flow_hash_params); 1124 if (priv->flow_table == NULL) { 1125 PMD_INIT_LOG(ERR, "flow hash table creation failed"); 1126 ret = -ENOMEM; 1127 goto free_flow_position; 1128 } 1129 1130 return 0; 1131 1132 free_flow_position: 1133 rte_free(priv->flow_position); 1134 free_priv: 1135 rte_free(priv); 1136 exit: 1137 return ret; 1138 } 1139 1140 void 1141 nfp_net_flow_priv_uninit(struct nfp_pf_dev *pf_dev, 1142 uint16_t port) 1143 { 1144 struct nfp_net_priv *priv; 1145 struct nfp_app_fw_nic *app_fw_nic; 1146 1147 if (pf_dev == NULL) 1148 return; 1149 1150 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1151 priv = app_fw_nic->ports[port]->priv; 1152 if (priv != NULL) { 1153 rte_hash_free(priv->flow_table); 1154 rte_free(priv->flow_position); 1155 } 1156 1157 rte_free(priv); 1158 } 1159