1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Corigine, Inc. 3 * All rights reserved. 4 */ 5 6 #include "nfp_net_flow.h" 7 8 #include <rte_flow_driver.h> 9 #include <rte_hash.h> 10 #include <rte_jhash.h> 11 #include <rte_malloc.h> 12 13 #include "nfp_logs.h" 14 #include "nfp_net_cmsg.h" 15 16 /* Static initializer for a list of subsequent item types */ 17 #define NEXT_ITEM(...) \ 18 ((const enum rte_flow_item_type []){ \ 19 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ 20 }) 21 22 /* Process structure associated with a flow item */ 23 struct nfp_net_flow_item_proc { 24 /* Bit-mask for fields supported by this PMD. */ 25 const void *mask_support; 26 /* Bit-mask to use when @p item->mask is not provided. */ 27 const void *mask_default; 28 /* Size in bytes for @p mask_support and @p mask_default. */ 29 const uint32_t mask_sz; 30 /* Merge a pattern item into a flow rule handle. */ 31 int (*merge)(struct rte_flow *nfp_flow, 32 const struct rte_flow_item *item, 33 const struct nfp_net_flow_item_proc *proc); 34 /* List of possible subsequent items. */ 35 const enum rte_flow_item_type *const next_item; 36 }; 37 38 static int 39 nfp_net_flow_table_add(struct nfp_net_priv *priv, 40 struct rte_flow *nfp_flow) 41 { 42 int ret; 43 44 ret = rte_hash_add_key_data(priv->flow_table, &nfp_flow->hash_key, nfp_flow); 45 if (ret != 0) { 46 PMD_DRV_LOG(ERR, "Add to flow table failed."); 47 return ret; 48 } 49 50 return 0; 51 } 52 53 static int 54 nfp_net_flow_table_delete(struct nfp_net_priv *priv, 55 struct rte_flow *nfp_flow) 56 { 57 int ret; 58 59 ret = rte_hash_del_key(priv->flow_table, &nfp_flow->hash_key); 60 if (ret < 0) { 61 PMD_DRV_LOG(ERR, "Delete from flow table failed."); 62 return ret; 63 } 64 65 return 0; 66 } 67 68 static struct rte_flow * 69 nfp_net_flow_table_search(struct nfp_net_priv *priv, 70 struct rte_flow *nfp_flow) 71 { 72 int index; 73 struct rte_flow *flow_find; 74 75 index = rte_hash_lookup_data(priv->flow_table, &nfp_flow->hash_key, 76 (void **)&flow_find); 77 if (index < 0) { 78 PMD_DRV_LOG(DEBUG, "Data NOT found in the flow table."); 79 return NULL; 80 } 81 82 return flow_find; 83 } 84 85 static int 86 nfp_net_flow_position_acquire(struct nfp_net_priv *priv, 87 uint32_t priority, 88 struct rte_flow *nfp_flow) 89 { 90 uint32_t i; 91 uint32_t limit; 92 93 limit = priv->flow_limit; 94 95 if (priority != 0) { 96 i = limit - priority - 1; 97 98 if (priv->flow_position[i]) { 99 PMD_DRV_LOG(ERR, "There is already a flow rule in this place."); 100 return -EAGAIN; 101 } 102 103 priv->flow_position[i] = true; 104 nfp_flow->position = priority; 105 return 0; 106 } 107 108 for (i = 0; i < limit; i++) { 109 if (!priv->flow_position[i]) { 110 priv->flow_position[i] = true; 111 break; 112 } 113 } 114 115 if (i == limit) { 116 PMD_DRV_LOG(ERR, "The limited flow number is reach."); 117 return -ERANGE; 118 } 119 120 nfp_flow->position = limit - i - 1; 121 122 return 0; 123 } 124 125 static void 126 nfp_net_flow_position_free(struct nfp_net_priv *priv, 127 struct rte_flow *nfp_flow) 128 { 129 uint32_t index; 130 131 index = NFP_NET_FLOW_LIMIT - 1 - nfp_flow->position; 132 133 priv->flow_position[index] = false; 134 } 135 136 static struct rte_flow * 137 nfp_net_flow_alloc(struct nfp_net_priv *priv, 138 uint32_t priority, 139 uint32_t match_len, 140 uint32_t action_len, 141 uint32_t port_id) 142 { 143 int ret; 144 char *data; 145 struct rte_flow *nfp_flow; 146 struct nfp_net_flow_payload *payload; 147 148 nfp_flow = rte_zmalloc("nfp_flow", sizeof(struct rte_flow), 0); 149 if (nfp_flow == NULL) 150 return NULL; 151 152 data = rte_zmalloc("nfp_flow_payload", match_len + action_len, 0); 153 if (data == NULL) 154 goto free_flow; 155 156 ret = nfp_net_flow_position_acquire(priv, priority, nfp_flow); 157 if (ret != 0) 158 goto free_payload; 159 160 nfp_flow->port_id = port_id; 161 payload = &nfp_flow->payload; 162 payload->match_len = match_len; 163 payload->action_len = action_len; 164 payload->match_data = data; 165 payload->action_data = data + match_len; 166 167 return nfp_flow; 168 169 free_payload: 170 rte_free(data); 171 free_flow: 172 rte_free(nfp_flow); 173 174 return NULL; 175 } 176 177 static void 178 nfp_net_flow_free(struct nfp_net_priv *priv, 179 struct rte_flow *nfp_flow) 180 { 181 nfp_net_flow_position_free(priv, nfp_flow); 182 rte_free(nfp_flow->payload.match_data); 183 rte_free(nfp_flow); 184 } 185 186 static int 187 nfp_net_flow_calculate_items(const struct rte_flow_item items[], 188 uint32_t *match_len, 189 uint32_t *item_type) 190 { 191 int ret = -EINVAL; 192 const struct rte_flow_item *item; 193 194 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 195 switch (item->type) { 196 case RTE_FLOW_ITEM_TYPE_ETH: 197 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected."); 198 *match_len = sizeof(struct nfp_net_cmsg_match_eth); 199 *item_type = RTE_FLOW_ITEM_TYPE_ETH; 200 ret = 0; 201 break; 202 case RTE_FLOW_ITEM_TYPE_IPV4: 203 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected."); 204 *match_len = sizeof(struct nfp_net_cmsg_match_v4); 205 *item_type = RTE_FLOW_ITEM_TYPE_IPV4; 206 return 0; 207 case RTE_FLOW_ITEM_TYPE_IPV6: 208 PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected."); 209 *match_len = sizeof(struct nfp_net_cmsg_match_v6); 210 *item_type = RTE_FLOW_ITEM_TYPE_IPV6; 211 return 0; 212 default: 213 PMD_DRV_LOG(ERR, "Can not calculate match length."); 214 *match_len = 0; 215 return -ENOTSUP; 216 } 217 } 218 219 return ret; 220 } 221 222 static int 223 nfp_net_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow, 224 const struct rte_flow_item *item, 225 __rte_unused const struct nfp_net_flow_item_proc *proc) 226 { 227 struct nfp_net_cmsg_match_eth *eth; 228 const struct rte_flow_item_eth *spec; 229 230 spec = item->spec; 231 if (spec == NULL) { 232 PMD_DRV_LOG(ERR, "NFP flow merge eth: no item->spec!"); 233 return -EINVAL; 234 } 235 236 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE; 237 238 eth = (struct nfp_net_cmsg_match_eth *)nfp_flow->payload.match_data; 239 eth->ether_type = rte_be_to_cpu_16(spec->type); 240 241 return 0; 242 } 243 244 static int 245 nfp_net_flow_merge_ipv4(struct rte_flow *nfp_flow, 246 const struct rte_flow_item *item, 247 const struct nfp_net_flow_item_proc *proc) 248 { 249 struct nfp_net_cmsg_match_v4 *ipv4; 250 const struct rte_flow_item_ipv4 *mask; 251 const struct rte_flow_item_ipv4 *spec; 252 253 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V4; 254 255 spec = item->spec; 256 if (spec == NULL) { 257 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!"); 258 return 0; 259 } 260 261 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 262 263 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 264 265 ipv4->l4_protocol_mask = mask->hdr.next_proto_id; 266 ipv4->src_ipv4_mask = rte_be_to_cpu_32(mask->hdr.src_addr); 267 ipv4->dst_ipv4_mask = rte_be_to_cpu_32(mask->hdr.dst_addr); 268 269 ipv4->l4_protocol = spec->hdr.next_proto_id; 270 ipv4->src_ipv4 = rte_be_to_cpu_32(spec->hdr.src_addr); 271 ipv4->dst_ipv4 = rte_be_to_cpu_32(spec->hdr.dst_addr); 272 273 return 0; 274 } 275 276 static int 277 nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow, 278 const struct rte_flow_item *item, 279 const struct nfp_net_flow_item_proc *proc) 280 { 281 uint32_t i; 282 struct nfp_net_cmsg_match_v6 *ipv6; 283 const struct rte_flow_item_ipv6 *mask; 284 const struct rte_flow_item_ipv6 *spec; 285 286 nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V6; 287 288 spec = item->spec; 289 if (spec == NULL) { 290 PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!"); 291 return 0; 292 } 293 294 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 295 296 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 297 298 ipv6->l4_protocol_mask = mask->hdr.proto; 299 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 300 ipv6->src_ipv6_mask[i] = mask->hdr.src_addr.a[i + 3]; 301 ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr.a[i + 2]; 302 ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr.a[i + 1]; 303 ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr.a[i]; 304 305 ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr.a[i + 3]; 306 ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr.a[i + 2]; 307 ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr.a[i + 1]; 308 ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr.a[i]; 309 } 310 311 ipv6->l4_protocol = spec->hdr.proto; 312 for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) { 313 ipv6->src_ipv6[i] = spec->hdr.src_addr.a[i + 3]; 314 ipv6->src_ipv6[i + 1] = spec->hdr.src_addr.a[i + 2]; 315 ipv6->src_ipv6[i + 2] = spec->hdr.src_addr.a[i + 1]; 316 ipv6->src_ipv6[i + 3] = spec->hdr.src_addr.a[i]; 317 318 ipv6->dst_ipv6[i] = spec->hdr.dst_addr.a[i + 3]; 319 ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr.a[i + 2]; 320 ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr.a[i + 1]; 321 ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr.a[i]; 322 } 323 324 return 0; 325 } 326 327 static int 328 nfp_flow_merge_l4(struct rte_flow *nfp_flow, 329 const struct rte_flow_item *item, 330 const struct nfp_net_flow_item_proc *proc) 331 { 332 const struct rte_flow_item_tcp *mask; 333 const struct rte_flow_item_tcp *spec; 334 struct nfp_net_cmsg_match_v4 *ipv4 = NULL; 335 struct nfp_net_cmsg_match_v6 *ipv6 = NULL; 336 337 spec = item->spec; 338 if (spec == NULL) { 339 PMD_DRV_LOG(ERR, "NFP flow merge tcp: no item->spec!"); 340 return -EINVAL; 341 } 342 343 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 344 345 switch (nfp_flow->payload.cmsg_type) { 346 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 347 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 348 break; 349 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 350 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 351 break; 352 default: 353 PMD_DRV_LOG(ERR, "L3 layer neither IPv4 nor IPv6."); 354 return -EINVAL; 355 } 356 357 if (ipv4 != NULL) { 358 ipv4->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 359 ipv4->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 360 361 ipv4->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 362 ipv4->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 363 } else if (ipv6 != NULL) { 364 ipv6->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port); 365 ipv6->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port); 366 367 ipv6->src_port = rte_be_to_cpu_16(spec->hdr.src_port); 368 ipv6->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port); 369 } else { 370 PMD_DRV_LOG(ERR, "No valid L3 layer pointer."); 371 return -EINVAL; 372 } 373 374 return 0; 375 } 376 377 /* Graph of supported items and associated process function */ 378 static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = { 379 [RTE_FLOW_ITEM_TYPE_END] = { 380 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH, 381 RTE_FLOW_ITEM_TYPE_IPV4, 382 RTE_FLOW_ITEM_TYPE_IPV6), 383 }, 384 [RTE_FLOW_ITEM_TYPE_ETH] = { 385 .merge = nfp_net_flow_merge_eth, 386 }, 387 [RTE_FLOW_ITEM_TYPE_IPV4] = { 388 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 389 RTE_FLOW_ITEM_TYPE_UDP, 390 RTE_FLOW_ITEM_TYPE_SCTP), 391 .mask_support = &(const struct rte_flow_item_ipv4){ 392 .hdr = { 393 .next_proto_id = 0xff, 394 .src_addr = RTE_BE32(0xffffffff), 395 .dst_addr = RTE_BE32(0xffffffff), 396 }, 397 }, 398 .mask_default = &rte_flow_item_ipv4_mask, 399 .mask_sz = sizeof(struct rte_flow_item_ipv4), 400 .merge = nfp_net_flow_merge_ipv4, 401 }, 402 [RTE_FLOW_ITEM_TYPE_IPV6] = { 403 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP, 404 RTE_FLOW_ITEM_TYPE_UDP, 405 RTE_FLOW_ITEM_TYPE_SCTP), 406 .mask_support = &(const struct rte_flow_item_ipv6){ 407 .hdr = { 408 .proto = 0xff, 409 .src_addr = RTE_IPV6_MASK_FULL, 410 .dst_addr = RTE_IPV6_MASK_FULL, 411 }, 412 }, 413 .mask_default = &rte_flow_item_ipv6_mask, 414 .mask_sz = sizeof(struct rte_flow_item_ipv6), 415 .merge = nfp_net_flow_merge_ipv6, 416 }, 417 [RTE_FLOW_ITEM_TYPE_TCP] = { 418 .mask_support = &(const struct rte_flow_item_tcp){ 419 .hdr = { 420 .src_port = RTE_BE16(0xffff), 421 .dst_port = RTE_BE16(0xffff), 422 }, 423 }, 424 .mask_default = &rte_flow_item_tcp_mask, 425 .mask_sz = sizeof(struct rte_flow_item_tcp), 426 .merge = nfp_flow_merge_l4, 427 }, 428 [RTE_FLOW_ITEM_TYPE_UDP] = { 429 .mask_support = &(const struct rte_flow_item_udp){ 430 .hdr = { 431 .src_port = RTE_BE16(0xffff), 432 .dst_port = RTE_BE16(0xffff), 433 }, 434 }, 435 .mask_default = &rte_flow_item_udp_mask, 436 .mask_sz = sizeof(struct rte_flow_item_udp), 437 .merge = nfp_flow_merge_l4, 438 }, 439 [RTE_FLOW_ITEM_TYPE_SCTP] = { 440 .mask_support = &(const struct rte_flow_item_sctp){ 441 .hdr = { 442 .src_port = RTE_BE16(0xffff), 443 .dst_port = RTE_BE16(0xffff), 444 }, 445 }, 446 .mask_default = &rte_flow_item_sctp_mask, 447 .mask_sz = sizeof(struct rte_flow_item_sctp), 448 .merge = nfp_flow_merge_l4, 449 }, 450 }; 451 452 static int 453 nfp_net_flow_item_check(const struct rte_flow_item *item, 454 const struct nfp_net_flow_item_proc *proc) 455 { 456 uint32_t i; 457 int ret = 0; 458 const uint8_t *mask; 459 460 /* item->last and item->mask cannot exist without item->spec. */ 461 if (item->spec == NULL) { 462 if (item->mask || item->last) { 463 PMD_DRV_LOG(ERR, "The 'mask' or 'last' field provided" 464 " without a corresponding 'spec'."); 465 return -EINVAL; 466 } 467 468 /* No spec, no mask, no problem. */ 469 return 0; 470 } 471 472 mask = (item->mask != NULL) ? item->mask : proc->mask_default; 473 474 /* 475 * Single-pass check to make sure that: 476 * - Mask is supported, no bits are set outside proc->mask_support. 477 * - Both item->spec and item->last are included in mask. 478 */ 479 for (i = 0; i != proc->mask_sz; ++i) { 480 if (mask[i] == 0) 481 continue; 482 483 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) != 484 ((const uint8_t *)proc->mask_support)[i]) { 485 PMD_DRV_LOG(ERR, "Unsupported field found in 'mask'."); 486 ret = -EINVAL; 487 break; 488 } 489 490 if (item->last != NULL && 491 (((const uint8_t *)item->spec)[i] & mask[i]) != 492 (((const uint8_t *)item->last)[i] & mask[i])) { 493 PMD_DRV_LOG(ERR, "Range between 'spec' and 'last'" 494 " is larger than 'mask'."); 495 ret = -ERANGE; 496 break; 497 } 498 } 499 500 return ret; 501 } 502 503 static int 504 nfp_net_flow_compile_items(const struct rte_flow_item items[], 505 struct rte_flow *nfp_flow) 506 { 507 uint32_t i; 508 int ret = 0; 509 const struct rte_flow_item *item; 510 const struct nfp_net_flow_item_proc *proc_list; 511 512 proc_list = nfp_net_flow_item_proc_list; 513 514 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { 515 const struct nfp_net_flow_item_proc *proc = NULL; 516 517 for (i = 0; (proc_list->next_item != NULL) && 518 (proc_list->next_item[i] != RTE_FLOW_ITEM_TYPE_END); ++i) { 519 if (proc_list->next_item[i] == item->type) { 520 proc = &nfp_net_flow_item_proc_list[item->type]; 521 break; 522 } 523 } 524 525 if (proc == NULL) { 526 PMD_DRV_LOG(ERR, "No next item provided for %d.", item->type); 527 ret = -ENOTSUP; 528 break; 529 } 530 531 /* Perform basic sanity checks */ 532 ret = nfp_net_flow_item_check(item, proc); 533 if (ret != 0) { 534 PMD_DRV_LOG(ERR, "NFP flow item %d check failed.", item->type); 535 ret = -EINVAL; 536 break; 537 } 538 539 if (proc->merge == NULL) { 540 PMD_DRV_LOG(ERR, "NFP flow item %d no proc function.", item->type); 541 ret = -ENOTSUP; 542 break; 543 } 544 545 ret = proc->merge(nfp_flow, item, proc); 546 if (ret != 0) { 547 PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed.", item->type); 548 break; 549 } 550 551 proc_list = proc; 552 } 553 554 return ret; 555 } 556 557 static void 558 nfp_net_flow_action_drop(struct rte_flow *nfp_flow) 559 { 560 struct nfp_net_cmsg_action *action_data; 561 562 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 563 564 action_data->action = NFP_NET_CMSG_ACTION_DROP; 565 } 566 567 static void 568 nfp_net_flow_action_mark(struct rte_flow *nfp_flow, 569 const struct rte_flow_action *action) 570 { 571 struct nfp_net_cmsg_action *action_data; 572 const struct rte_flow_action_mark *mark; 573 574 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 575 mark = action->conf; 576 577 action_data->action |= NFP_NET_CMSG_ACTION_MARK; 578 action_data->mark_id = mark->id; 579 } 580 581 static int 582 nfp_net_flow_action_queue(struct rte_eth_dev *dev, 583 struct rte_flow *nfp_flow, 584 const struct rte_flow_action *action) 585 { 586 struct nfp_net_cmsg_action *action_data; 587 const struct rte_flow_action_queue *queue; 588 589 action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data; 590 queue = action->conf; 591 if (queue->index >= dev->data->nb_rx_queues || 592 dev->data->rx_queues[queue->index] == NULL) { 593 PMD_DRV_LOG(ERR, "Queue index is illegal."); 594 return -EINVAL; 595 } 596 597 action_data->action |= NFP_NET_CMSG_ACTION_QUEUE; 598 action_data->queue = queue->index; 599 600 return 0; 601 } 602 603 static int 604 nfp_net_flow_compile_actions(struct rte_eth_dev *dev, 605 const struct rte_flow_action actions[], 606 struct rte_flow *nfp_flow) 607 { 608 int ret = 0; 609 const struct rte_flow_action *action; 610 611 for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { 612 switch (action->type) { 613 case RTE_FLOW_ACTION_TYPE_DROP: 614 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP."); 615 nfp_net_flow_action_drop(nfp_flow); 616 return 0; 617 case RTE_FLOW_ACTION_TYPE_MARK: 618 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK."); 619 nfp_net_flow_action_mark(nfp_flow, action); 620 break; 621 case RTE_FLOW_ACTION_TYPE_QUEUE: 622 PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE."); 623 ret = nfp_net_flow_action_queue(dev, nfp_flow, action); 624 break; 625 default: 626 PMD_DRV_LOG(ERR, "Unsupported action type: %d.", action->type); 627 return -ENOTSUP; 628 } 629 } 630 631 return ret; 632 } 633 634 static void 635 nfp_net_flow_process_priority(struct rte_flow *nfp_flow, 636 uint32_t match_len) 637 { 638 struct nfp_net_cmsg_match_v4 *ipv4; 639 struct nfp_net_cmsg_match_v6 *ipv6; 640 641 switch (match_len) { 642 case sizeof(struct nfp_net_cmsg_match_v4): 643 ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data; 644 ipv4->position = nfp_flow->position; 645 break; 646 case sizeof(struct nfp_net_cmsg_match_v6): 647 ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data; 648 ipv6->position = nfp_flow->position; 649 break; 650 default: 651 break; 652 } 653 } 654 655 static int 656 nfp_net_flow_check_count(struct nfp_net_flow_count *flow_count, 657 uint32_t item_type) 658 { 659 int ret = 0; 660 661 switch (item_type) { 662 case RTE_FLOW_ITEM_TYPE_ETH: 663 if (flow_count->eth_count >= NFP_NET_ETH_FLOW_LIMIT) 664 ret = -ENOSPC; 665 break; 666 case RTE_FLOW_ITEM_TYPE_IPV4: 667 if (flow_count->ipv4_count >= NFP_NET_IPV4_FLOW_LIMIT) 668 ret = -ENOSPC; 669 break; 670 case RTE_FLOW_ITEM_TYPE_IPV6: 671 if (flow_count->ipv6_count >= NFP_NET_IPV6_FLOW_LIMIT) 672 ret = -ENOSPC; 673 break; 674 default: 675 ret = -ENOTSUP; 676 break; 677 } 678 679 return ret; 680 } 681 682 static int 683 nfp_net_flow_calculate_count(struct rte_flow *nfp_flow, 684 struct nfp_net_flow_count *flow_count, 685 bool delete_flag) 686 { 687 uint16_t *count; 688 689 switch (nfp_flow->payload.cmsg_type) { 690 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4: 691 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V4: 692 count = &flow_count->ipv4_count; 693 break; 694 case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6: 695 case NFP_NET_CFG_MBOX_CMD_FS_DEL_V6: 696 count = &flow_count->ipv6_count; 697 break; 698 case NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE: 699 case NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE: 700 count = &flow_count->eth_count; 701 break; 702 default: 703 PMD_DRV_LOG(ERR, "Flow count calculate failed."); 704 return -EINVAL; 705 } 706 707 if (delete_flag) 708 (*count)--; 709 else 710 (*count)++; 711 712 return 0; 713 } 714 715 static struct rte_flow * 716 nfp_net_flow_setup(struct rte_eth_dev *dev, 717 const struct rte_flow_attr *attr, 718 const struct rte_flow_item items[], 719 const struct rte_flow_action actions[]) 720 { 721 int ret; 722 char *hash_data; 723 uint32_t port_id; 724 uint32_t item_type; 725 uint32_t action_len; 726 struct nfp_net_hw *hw; 727 uint32_t match_len = 0; 728 struct nfp_net_priv *priv; 729 struct rte_flow *nfp_flow; 730 struct rte_flow *flow_find; 731 struct nfp_net_hw_priv *hw_priv; 732 struct nfp_app_fw_nic *app_fw_nic; 733 734 hw = dev->data->dev_private; 735 hw_priv = dev->process_private; 736 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 737 priv = app_fw_nic->ports[hw->idx]->priv; 738 739 ret = nfp_net_flow_calculate_items(items, &match_len, &item_type); 740 if (ret != 0) { 741 PMD_DRV_LOG(ERR, "Key layers calculate failed."); 742 return NULL; 743 } 744 745 ret = nfp_net_flow_check_count(&priv->flow_count, item_type); 746 if (ret != 0) { 747 PMD_DRV_LOG(ERR, "Flow count check failed."); 748 return NULL; 749 } 750 751 action_len = sizeof(struct nfp_net_cmsg_action); 752 port_id = ((struct nfp_net_hw *)dev->data->dev_private)->nfp_idx; 753 754 nfp_flow = nfp_net_flow_alloc(priv, attr->priority, match_len, action_len, port_id); 755 if (nfp_flow == NULL) { 756 PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); 757 return NULL; 758 } 759 760 ret = nfp_net_flow_compile_items(items, nfp_flow); 761 if (ret != 0) { 762 PMD_DRV_LOG(ERR, "NFP flow item process failed."); 763 goto free_flow; 764 } 765 766 ret = nfp_net_flow_compile_actions(dev, actions, nfp_flow); 767 if (ret != 0) { 768 PMD_DRV_LOG(ERR, "NFP flow action process failed."); 769 goto free_flow; 770 } 771 772 /* Calculate and store the hash_key for later use */ 773 hash_data = nfp_flow->payload.match_data; 774 nfp_flow->hash_key = rte_jhash(hash_data, match_len + action_len, 775 priv->hash_seed); 776 777 /* Find the flow in hash table */ 778 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 779 if (flow_find != NULL) { 780 PMD_DRV_LOG(ERR, "This flow is already exist."); 781 goto free_flow; 782 } 783 784 ret = nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, false); 785 if (ret != 0) { 786 PMD_DRV_LOG(ERR, "NFP flow calculate count failed."); 787 goto free_flow; 788 } 789 790 nfp_net_flow_process_priority(nfp_flow, match_len); 791 792 return nfp_flow; 793 794 free_flow: 795 nfp_net_flow_free(priv, nfp_flow); 796 797 return NULL; 798 } 799 800 static int 801 nfp_net_flow_teardown(struct nfp_net_priv *priv, 802 struct rte_flow *nfp_flow) 803 { 804 return nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, true); 805 } 806 807 static int 808 nfp_net_flow_offload(struct nfp_net_hw *hw, 809 struct rte_flow *flow, 810 bool delete_flag) 811 { 812 int ret; 813 char *tmp; 814 uint32_t msg_size; 815 struct nfp_net_cmsg *cmsg; 816 817 msg_size = sizeof(uint32_t) + flow->payload.match_len + 818 flow->payload.action_len; 819 cmsg = nfp_net_cmsg_alloc(msg_size); 820 if (cmsg == NULL) { 821 PMD_DRV_LOG(ERR, "Alloc cmsg failed."); 822 return -ENOMEM; 823 } 824 825 cmsg->cmd = flow->payload.cmsg_type; 826 if (delete_flag) 827 cmsg->cmd++; 828 829 tmp = (char *)cmsg->data; 830 rte_memcpy(tmp, flow->payload.match_data, flow->payload.match_len); 831 tmp += flow->payload.match_len; 832 rte_memcpy(tmp, flow->payload.action_data, flow->payload.action_len); 833 834 ret = nfp_net_cmsg_xmit(hw, cmsg, msg_size); 835 if (ret != 0) { 836 PMD_DRV_LOG(ERR, "Send cmsg failed."); 837 ret = -EINVAL; 838 goto free_cmsg; 839 } 840 841 free_cmsg: 842 nfp_net_cmsg_free(cmsg); 843 844 return ret; 845 } 846 847 static int 848 nfp_net_flow_validate(struct rte_eth_dev *dev, 849 const struct rte_flow_attr *attr, 850 const struct rte_flow_item items[], 851 const struct rte_flow_action actions[], 852 struct rte_flow_error *error) 853 { 854 int ret; 855 struct nfp_net_hw *hw; 856 struct rte_flow *nfp_flow; 857 struct nfp_net_priv *priv; 858 struct nfp_net_hw_priv *hw_priv; 859 struct nfp_app_fw_nic *app_fw_nic; 860 861 hw = dev->data->dev_private; 862 hw_priv = dev->process_private; 863 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 864 priv = app_fw_nic->ports[hw->idx]->priv; 865 866 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 867 if (nfp_flow == NULL) { 868 return rte_flow_error_set(error, ENOTSUP, 869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 870 NULL, "This flow can not be offloaded."); 871 } 872 873 ret = nfp_net_flow_teardown(priv, nfp_flow); 874 if (ret != 0) { 875 return rte_flow_error_set(error, EINVAL, 876 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 877 NULL, "Flow resource free failed."); 878 } 879 880 nfp_net_flow_free(priv, nfp_flow); 881 882 return 0; 883 } 884 885 static struct rte_flow * 886 nfp_net_flow_create(struct rte_eth_dev *dev, 887 const struct rte_flow_attr *attr, 888 const struct rte_flow_item items[], 889 const struct rte_flow_action actions[], 890 struct rte_flow_error *error) 891 { 892 int ret; 893 struct nfp_net_hw *hw; 894 struct rte_flow *nfp_flow; 895 struct nfp_net_priv *priv; 896 struct nfp_net_hw_priv *hw_priv; 897 struct nfp_app_fw_nic *app_fw_nic; 898 899 hw = dev->data->dev_private; 900 hw_priv = dev->process_private; 901 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 902 priv = app_fw_nic->ports[hw->idx]->priv; 903 904 nfp_flow = nfp_net_flow_setup(dev, attr, items, actions); 905 if (nfp_flow == NULL) { 906 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 907 NULL, "This flow can not be offloaded."); 908 return NULL; 909 } 910 911 /* Add the flow to flow hash table */ 912 ret = nfp_net_flow_table_add(priv, nfp_flow); 913 if (ret != 0) { 914 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 915 NULL, "Add flow to the flow table failed."); 916 goto flow_teardown; 917 } 918 919 /* Add the flow to hardware */ 920 ret = nfp_net_flow_offload(hw, nfp_flow, false); 921 if (ret != 0) { 922 rte_flow_error_set(error, EINVAL, 923 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 924 NULL, "Add flow to firmware failed."); 925 goto table_delete; 926 } 927 928 return nfp_flow; 929 930 table_delete: 931 nfp_net_flow_table_delete(priv, nfp_flow); 932 flow_teardown: 933 nfp_net_flow_teardown(priv, nfp_flow); 934 nfp_net_flow_free(priv, nfp_flow); 935 936 return NULL; 937 } 938 939 static int 940 nfp_net_flow_destroy(struct rte_eth_dev *dev, 941 struct rte_flow *nfp_flow, 942 struct rte_flow_error *error) 943 { 944 int ret; 945 struct nfp_net_hw *hw; 946 struct nfp_net_priv *priv; 947 struct rte_flow *flow_find; 948 struct nfp_net_hw_priv *hw_priv; 949 struct nfp_app_fw_nic *app_fw_nic; 950 951 hw = dev->data->dev_private; 952 hw_priv = dev->process_private; 953 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 954 priv = app_fw_nic->ports[hw->idx]->priv; 955 956 /* Find the flow in flow hash table */ 957 flow_find = nfp_net_flow_table_search(priv, nfp_flow); 958 if (flow_find == NULL) { 959 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 960 NULL, "Flow does not exist."); 961 ret = -EINVAL; 962 goto exit; 963 } 964 965 /* Delete the flow from hardware */ 966 ret = nfp_net_flow_offload(hw, nfp_flow, true); 967 if (ret != 0) { 968 rte_flow_error_set(error, EINVAL, 969 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 970 NULL, "Delete flow from firmware failed."); 971 ret = -EINVAL; 972 goto exit; 973 } 974 975 /* Delete the flow from flow hash table */ 976 ret = nfp_net_flow_table_delete(priv, nfp_flow); 977 if (ret != 0) { 978 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 979 NULL, "Delete flow from the flow table failed."); 980 ret = -EINVAL; 981 goto exit; 982 } 983 984 ret = nfp_net_flow_teardown(priv, nfp_flow); 985 if (ret != 0) { 986 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 987 NULL, "Flow teardown failed."); 988 ret = -EINVAL; 989 goto exit; 990 } 991 992 exit: 993 nfp_net_flow_free(priv, nfp_flow); 994 995 return ret; 996 } 997 998 static int 999 nfp_net_flow_flush(struct rte_eth_dev *dev, 1000 struct rte_flow_error *error) 1001 { 1002 int ret = 0; 1003 void *next_data; 1004 uint32_t iter = 0; 1005 const void *next_key; 1006 struct nfp_net_hw *hw; 1007 struct rte_flow *nfp_flow; 1008 struct rte_hash *flow_table; 1009 struct nfp_net_hw_priv *hw_priv; 1010 struct nfp_app_fw_nic *app_fw_nic; 1011 1012 hw = dev->data->dev_private; 1013 hw_priv = dev->process_private; 1014 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv); 1015 flow_table = app_fw_nic->ports[hw->idx]->priv->flow_table; 1016 1017 while (rte_hash_iterate(flow_table, &next_key, &next_data, &iter) >= 0) { 1018 nfp_flow = next_data; 1019 ret = nfp_net_flow_destroy(dev, nfp_flow, error); 1020 if (ret != 0) 1021 break; 1022 } 1023 1024 return ret; 1025 } 1026 1027 static const struct rte_flow_ops nfp_net_flow_ops = { 1028 .validate = nfp_net_flow_validate, 1029 .create = nfp_net_flow_create, 1030 .destroy = nfp_net_flow_destroy, 1031 .flush = nfp_net_flow_flush, 1032 }; 1033 1034 int 1035 nfp_net_flow_ops_get(struct rte_eth_dev *dev, 1036 const struct rte_flow_ops **ops) 1037 { 1038 struct nfp_net_hw *hw; 1039 1040 if (rte_eth_dev_is_repr(dev)) { 1041 *ops = NULL; 1042 PMD_DRV_LOG(ERR, "Port is a representor."); 1043 return -EINVAL; 1044 } 1045 1046 hw = dev->data->dev_private; 1047 if ((hw->super.ctrl_ext & NFP_NET_CFG_CTRL_FLOW_STEER) == 0) { 1048 *ops = NULL; 1049 return 0; 1050 } 1051 1052 *ops = &nfp_net_flow_ops; 1053 1054 return 0; 1055 } 1056 1057 static uint32_t 1058 nfp_net_fs_max_entry_get(struct nfp_hw *hw) 1059 { 1060 uint32_t cnt; 1061 1062 cnt = nn_cfg_readl(hw, NFP_NET_CFG_MAX_FS_CAP); 1063 if (cnt != 0) 1064 return cnt; 1065 1066 return NFP_NET_FLOW_LIMIT; 1067 } 1068 1069 int 1070 nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev, 1071 uint16_t port) 1072 { 1073 int ret = 0; 1074 struct nfp_hw *hw; 1075 struct nfp_net_priv *priv; 1076 char flow_name[RTE_HASH_NAMESIZE]; 1077 struct nfp_app_fw_nic *app_fw_nic; 1078 const char *pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; 1079 1080 snprintf(flow_name, sizeof(flow_name), "%s_fl_%u", pci_name, port); 1081 1082 struct rte_hash_parameters flow_hash_params = { 1083 .name = flow_name, 1084 .hash_func = rte_jhash, 1085 .socket_id = rte_socket_id(), 1086 .key_len = sizeof(uint32_t), 1087 .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, 1088 }; 1089 1090 priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0); 1091 if (priv == NULL) { 1092 PMD_INIT_LOG(ERR, "NFP app nic priv creation failed."); 1093 ret = -ENOMEM; 1094 goto exit; 1095 } 1096 1097 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1098 app_fw_nic->ports[port]->priv = priv; 1099 priv->hash_seed = (uint32_t)rte_rand(); 1100 1101 /* Flow limit */ 1102 hw = &app_fw_nic->ports[port]->super; 1103 priv->flow_limit = nfp_net_fs_max_entry_get(hw); 1104 if (priv->flow_limit == 0) { 1105 PMD_INIT_LOG(ERR, "NFP app nic flow limit not right."); 1106 ret = -EINVAL; 1107 goto free_priv; 1108 } 1109 1110 /* Flow position array */ 1111 priv->flow_position = rte_zmalloc(NULL, sizeof(bool) * priv->flow_limit, 0); 1112 if (priv->flow_position == NULL) { 1113 PMD_INIT_LOG(ERR, "NFP app nic flow position creation failed."); 1114 ret = -ENOMEM; 1115 goto free_priv; 1116 } 1117 1118 /* Flow table */ 1119 flow_hash_params.hash_func_init_val = priv->hash_seed; 1120 flow_hash_params.entries = priv->flow_limit * NFP_NET_HASH_REDUNDANCE; 1121 priv->flow_table = rte_hash_create(&flow_hash_params); 1122 if (priv->flow_table == NULL) { 1123 PMD_INIT_LOG(ERR, "Flow hash table creation failed."); 1124 ret = -ENOMEM; 1125 goto free_flow_position; 1126 } 1127 1128 return 0; 1129 1130 free_flow_position: 1131 rte_free(priv->flow_position); 1132 free_priv: 1133 rte_free(priv); 1134 exit: 1135 return ret; 1136 } 1137 1138 void 1139 nfp_net_flow_priv_uninit(struct nfp_pf_dev *pf_dev, 1140 uint16_t port) 1141 { 1142 struct nfp_net_priv *priv; 1143 struct nfp_app_fw_nic *app_fw_nic; 1144 1145 if (pf_dev == NULL) 1146 return; 1147 1148 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1149 priv = app_fw_nic->ports[port]->priv; 1150 if (priv != NULL) { 1151 rte_hash_free(priv->flow_table); 1152 rte_free(priv->flow_position); 1153 } 1154 1155 rte_free(priv); 1156 } 1157