1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 8 #include <rte_log.h> 9 #include <rte_malloc.h> 10 #include <rte_flow.h> 11 #include <rte_flow_driver.h> 12 #include <rte_tailq.h> 13 #include <rte_alarm.h> 14 #include <rte_cycles.h> 15 16 #include "bnxt.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_ring.h" 20 #include "bnxt_rxq.h" 21 #include "bnxt_rxr.h" 22 #include "bnxt_vnic.h" 23 #include "hsi_struct_def_dpdk.h" 24 25 static int 26 bnxt_flow_args_validate(const struct rte_flow_attr *attr, 27 const struct rte_flow_item pattern[], 28 const struct rte_flow_action actions[], 29 struct rte_flow_error *error) 30 { 31 if (!pattern) { 32 rte_flow_error_set(error, 33 EINVAL, 34 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 35 NULL, 36 "NULL pattern."); 37 return -rte_errno; 38 } 39 40 if (!actions) { 41 rte_flow_error_set(error, 42 EINVAL, 43 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 44 NULL, 45 "NULL action."); 46 return -rte_errno; 47 } 48 49 if (!attr) { 50 rte_flow_error_set(error, 51 EINVAL, 52 RTE_FLOW_ERROR_TYPE_ATTR, 53 NULL, 54 "NULL attribute."); 55 return -rte_errno; 56 } 57 58 return 0; 59 } 60 61 static const struct rte_flow_item * 62 bnxt_flow_non_void_item(const struct rte_flow_item *cur) 63 { 64 while (1) { 65 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) 66 return cur; 67 cur++; 68 } 69 } 70 71 static const struct rte_flow_action * 72 bnxt_flow_non_void_action(const struct rte_flow_action *cur) 73 { 74 while (1) { 75 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) 76 return cur; 77 cur++; 78 } 79 } 80 81 static int 82 bnxt_filter_type_check(const struct rte_flow_item pattern[], 83 struct rte_flow_error *error) 84 { 85 const struct rte_flow_item *item = 86 bnxt_flow_non_void_item(pattern); 87 int use_ntuple = 1; 88 bool has_vlan = 0; 89 90 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 91 switch (item->type) { 92 case RTE_FLOW_ITEM_TYPE_ANY: 93 case RTE_FLOW_ITEM_TYPE_ETH: 94 use_ntuple = 0; 95 break; 96 case RTE_FLOW_ITEM_TYPE_VLAN: 97 use_ntuple = 0; 98 has_vlan = 1; 99 break; 100 case RTE_FLOW_ITEM_TYPE_IPV4: 101 case RTE_FLOW_ITEM_TYPE_IPV6: 102 case RTE_FLOW_ITEM_TYPE_TCP: 103 case RTE_FLOW_ITEM_TYPE_UDP: 104 /* FALLTHROUGH */ 105 /* need ntuple match, reset exact match */ 106 use_ntuple |= 1; 107 break; 108 default: 109 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n"); 110 use_ntuple |= 0; 111 } 112 item++; 113 } 114 115 if (has_vlan && use_ntuple) { 116 PMD_DRV_LOG(ERR, 117 "VLAN flow cannot use NTUPLE filter\n"); 118 rte_flow_error_set(error, EINVAL, 119 RTE_FLOW_ERROR_TYPE_ITEM, 120 item, 121 "Cannot use VLAN with NTUPLE"); 122 return -rte_errno; 123 } 124 125 return use_ntuple; 126 } 127 128 static int 129 bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr, 130 const struct rte_flow_item pattern[], 131 struct rte_flow_error *error, 132 struct bnxt_filter_info *filter) 133 { 134 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); 135 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; 136 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; 137 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; 138 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; 139 const struct rte_flow_item_udp *udp_spec, *udp_mask; 140 const struct rte_flow_item_eth *eth_spec, *eth_mask; 141 const struct rte_ether_addr *dst, *src; 142 const struct rte_flow_item_nvgre *nvgre_spec; 143 const struct rte_flow_item_nvgre *nvgre_mask; 144 const struct rte_flow_item_gre *gre_spec; 145 const struct rte_flow_item_gre *gre_mask; 146 const struct rte_flow_item_vxlan *vxlan_spec; 147 const struct rte_flow_item_vxlan *vxlan_mask; 148 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; 149 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; 150 uint32_t tenant_id_be = 0, valid_flags = 0; 151 bool vni_masked = 0; 152 bool tni_masked = 0; 153 uint32_t en_ethertype; 154 uint8_t inner = 0; 155 uint32_t en = 0; 156 int use_ntuple; 157 158 use_ntuple = bnxt_filter_type_check(pattern, error); 159 if (use_ntuple < 0) 160 return use_ntuple; 161 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); 162 163 filter->filter_type = use_ntuple ? 164 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER; 165 en_ethertype = use_ntuple ? 166 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : 167 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; 168 169 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 170 if (item->last) { 171 /* last or range is NOT supported as match criteria */ 172 rte_flow_error_set(error, EINVAL, 173 RTE_FLOW_ERROR_TYPE_ITEM, 174 item, 175 "No support for range"); 176 return -rte_errno; 177 } 178 179 switch (item->type) { 180 case RTE_FLOW_ITEM_TYPE_ANY: 181 inner = 182 ((const struct rte_flow_item_any *)item->spec)->num > 3; 183 if (inner) 184 PMD_DRV_LOG(DEBUG, "Parse inner header\n"); 185 break; 186 case RTE_FLOW_ITEM_TYPE_ETH: 187 if (!item->spec) 188 break; 189 190 eth_spec = item->spec; 191 192 if (item->mask) 193 eth_mask = item->mask; 194 else 195 eth_mask = &rte_flow_item_eth_mask; 196 197 /* Source MAC address mask cannot be partially set. 198 * Should be All 0's or all 1's. 199 * Destination MAC address mask must not be partially 200 * set. Should be all 1's or all 0's. 201 */ 202 if ((!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) && 203 !rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) || 204 (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) && 205 !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) { 206 rte_flow_error_set(error, 207 EINVAL, 208 RTE_FLOW_ERROR_TYPE_ITEM, 209 item, 210 "MAC_addr mask not valid"); 211 return -rte_errno; 212 } 213 214 /* Mask is not allowed. Only exact matches are */ 215 if (eth_mask->hdr.ether_type && 216 eth_mask->hdr.ether_type != RTE_BE16(0xffff)) { 217 rte_flow_error_set(error, EINVAL, 218 RTE_FLOW_ERROR_TYPE_ITEM, 219 item, 220 "ethertype mask not valid"); 221 return -rte_errno; 222 } 223 224 if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) { 225 dst = ð_spec->hdr.dst_addr; 226 if (!rte_is_valid_assigned_ether_addr(dst)) { 227 rte_flow_error_set(error, 228 EINVAL, 229 RTE_FLOW_ERROR_TYPE_ITEM, 230 item, 231 "DMAC is invalid"); 232 PMD_DRV_LOG(ERR, 233 "DMAC is invalid!\n"); 234 return -rte_errno; 235 } 236 rte_memcpy(filter->dst_macaddr, 237 ð_spec->hdr.dst_addr, RTE_ETHER_ADDR_LEN); 238 en |= use_ntuple ? 239 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : 240 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; 241 valid_flags |= inner ? 242 BNXT_FLOW_L2_INNER_DST_VALID_FLAG : 243 BNXT_FLOW_L2_DST_VALID_FLAG; 244 filter->priority = attr->priority; 245 PMD_DRV_LOG(DEBUG, 246 "Creating a priority flow\n"); 247 } 248 if (rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) { 249 src = ð_spec->hdr.src_addr; 250 if (!rte_is_valid_assigned_ether_addr(src)) { 251 rte_flow_error_set(error, 252 EINVAL, 253 RTE_FLOW_ERROR_TYPE_ITEM, 254 item, 255 "SMAC is invalid"); 256 PMD_DRV_LOG(ERR, 257 "SMAC is invalid!\n"); 258 return -rte_errno; 259 } 260 rte_memcpy(filter->src_macaddr, 261 ð_spec->hdr.src_addr, RTE_ETHER_ADDR_LEN); 262 en |= use_ntuple ? 263 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : 264 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; 265 valid_flags |= inner ? 266 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG : 267 BNXT_FLOW_L2_SRC_VALID_FLAG; 268 } /* 269 * else { 270 * PMD_DRV_LOG(ERR, "Handle this condition\n"); 271 * } 272 */ 273 if (eth_mask->hdr.ether_type) { 274 filter->ethertype = 275 rte_be_to_cpu_16(eth_spec->hdr.ether_type); 276 en |= en_ethertype; 277 } 278 if (inner) 279 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG; 280 281 break; 282 case RTE_FLOW_ITEM_TYPE_VLAN: 283 vlan_spec = item->spec; 284 285 if (item->mask) 286 vlan_mask = item->mask; 287 else 288 vlan_mask = &rte_flow_item_vlan_mask; 289 290 if (en & en_ethertype) { 291 rte_flow_error_set(error, EINVAL, 292 RTE_FLOW_ERROR_TYPE_ITEM, 293 item, 294 "VLAN TPID matching is not" 295 " supported"); 296 return -rte_errno; 297 } 298 if (vlan_mask->hdr.vlan_tci && 299 vlan_mask->hdr.vlan_tci == RTE_BE16(0x0fff)) { 300 /* Only the VLAN ID can be matched. */ 301 filter->l2_ovlan = 302 rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci & 303 RTE_BE16(0x0fff)); 304 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 305 } else { 306 rte_flow_error_set(error, 307 EINVAL, 308 RTE_FLOW_ERROR_TYPE_ITEM, 309 item, 310 "VLAN mask is invalid"); 311 return -rte_errno; 312 } 313 if (vlan_mask->hdr.eth_proto && 314 vlan_mask->hdr.eth_proto != RTE_BE16(0xffff)) { 315 rte_flow_error_set(error, EINVAL, 316 RTE_FLOW_ERROR_TYPE_ITEM, 317 item, 318 "inner ethertype mask not" 319 " valid"); 320 return -rte_errno; 321 } 322 if (vlan_mask->hdr.eth_proto) { 323 filter->ethertype = 324 rte_be_to_cpu_16(vlan_spec->hdr.eth_proto); 325 en |= en_ethertype; 326 } 327 328 break; 329 case RTE_FLOW_ITEM_TYPE_IPV4: 330 /* If mask is not involved, we could use EM filters. */ 331 ipv4_spec = item->spec; 332 333 if (!item->spec) 334 break; 335 336 if (item->mask) 337 ipv4_mask = item->mask; 338 else 339 ipv4_mask = &rte_flow_item_ipv4_mask; 340 341 /* Only IP DST and SRC fields are maskable. */ 342 if (ipv4_mask->hdr.version_ihl || 343 ipv4_mask->hdr.type_of_service || 344 ipv4_mask->hdr.total_length || 345 ipv4_mask->hdr.packet_id || 346 ipv4_mask->hdr.fragment_offset || 347 ipv4_mask->hdr.time_to_live || 348 ipv4_mask->hdr.next_proto_id || 349 ipv4_mask->hdr.hdr_checksum) { 350 rte_flow_error_set(error, 351 EINVAL, 352 RTE_FLOW_ERROR_TYPE_ITEM, 353 item, 354 "Invalid IPv4 mask."); 355 return -rte_errno; 356 } 357 358 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; 359 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; 360 361 if (use_ntuple) 362 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 363 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 364 else 365 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 366 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 367 368 if (ipv4_mask->hdr.src_addr) { 369 filter->src_ipaddr_mask[0] = 370 ipv4_mask->hdr.src_addr; 371 en |= !use_ntuple ? 0 : 372 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 373 } 374 375 if (ipv4_mask->hdr.dst_addr) { 376 filter->dst_ipaddr_mask[0] = 377 ipv4_mask->hdr.dst_addr; 378 en |= !use_ntuple ? 0 : 379 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 380 } 381 382 filter->ip_addr_type = use_ntuple ? 383 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : 384 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 385 386 if (ipv4_spec->hdr.next_proto_id) { 387 filter->ip_protocol = 388 ipv4_spec->hdr.next_proto_id; 389 if (use_ntuple) 390 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 391 else 392 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; 393 } 394 break; 395 case RTE_FLOW_ITEM_TYPE_IPV6: 396 ipv6_spec = item->spec; 397 398 if (!item->spec) 399 break; 400 401 if (item->mask) 402 ipv6_mask = item->mask; 403 else 404 ipv6_mask = &rte_flow_item_ipv6_mask; 405 406 /* Only IP DST and SRC fields are maskable. */ 407 if (ipv6_mask->hdr.vtc_flow || 408 ipv6_mask->hdr.payload_len || 409 ipv6_mask->hdr.proto || 410 ipv6_mask->hdr.hop_limits) { 411 rte_flow_error_set(error, 412 EINVAL, 413 RTE_FLOW_ERROR_TYPE_ITEM, 414 item, 415 "Invalid IPv6 mask."); 416 return -rte_errno; 417 } 418 419 if (use_ntuple) 420 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 421 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 422 else 423 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 424 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 425 426 rte_memcpy(filter->src_ipaddr, 427 ipv6_spec->hdr.src_addr, 16); 428 rte_memcpy(filter->dst_ipaddr, 429 ipv6_spec->hdr.dst_addr, 16); 430 431 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, 432 16)) { 433 rte_memcpy(filter->src_ipaddr_mask, 434 ipv6_mask->hdr.src_addr, 16); 435 en |= !use_ntuple ? 0 : 436 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 437 } 438 439 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, 440 16)) { 441 rte_memcpy(filter->dst_ipaddr_mask, 442 ipv6_mask->hdr.dst_addr, 16); 443 en |= !use_ntuple ? 0 : 444 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 445 } 446 447 filter->ip_addr_type = use_ntuple ? 448 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : 449 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 450 break; 451 case RTE_FLOW_ITEM_TYPE_TCP: 452 tcp_spec = item->spec; 453 454 if (!item->spec) 455 break; 456 457 if (item->mask) 458 tcp_mask = item->mask; 459 else 460 tcp_mask = &rte_flow_item_tcp_mask; 461 462 /* Check TCP mask. Only DST & SRC ports are maskable */ 463 if (tcp_mask->hdr.sent_seq || 464 tcp_mask->hdr.recv_ack || 465 tcp_mask->hdr.data_off || 466 tcp_mask->hdr.tcp_flags || 467 tcp_mask->hdr.rx_win || 468 tcp_mask->hdr.cksum || 469 tcp_mask->hdr.tcp_urp) { 470 rte_flow_error_set(error, 471 EINVAL, 472 RTE_FLOW_ERROR_TYPE_ITEM, 473 item, 474 "Invalid TCP mask"); 475 return -rte_errno; 476 } 477 478 filter->src_port = tcp_spec->hdr.src_port; 479 filter->dst_port = tcp_spec->hdr.dst_port; 480 481 if (use_ntuple) 482 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 483 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 484 else 485 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 486 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 487 488 if (tcp_mask->hdr.dst_port) { 489 filter->dst_port_mask = tcp_mask->hdr.dst_port; 490 en |= !use_ntuple ? 0 : 491 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 492 } 493 494 if (tcp_mask->hdr.src_port) { 495 filter->src_port_mask = tcp_mask->hdr.src_port; 496 en |= !use_ntuple ? 0 : 497 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 498 } 499 break; 500 case RTE_FLOW_ITEM_TYPE_UDP: 501 udp_spec = item->spec; 502 503 if (!item->spec) 504 break; 505 506 if (item->mask) 507 udp_mask = item->mask; 508 else 509 udp_mask = &rte_flow_item_udp_mask; 510 511 if (udp_mask->hdr.dgram_len || 512 udp_mask->hdr.dgram_cksum) { 513 rte_flow_error_set(error, 514 EINVAL, 515 RTE_FLOW_ERROR_TYPE_ITEM, 516 item, 517 "Invalid UDP mask"); 518 return -rte_errno; 519 } 520 521 filter->src_port = udp_spec->hdr.src_port; 522 filter->dst_port = udp_spec->hdr.dst_port; 523 524 if (use_ntuple) 525 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 526 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 527 else 528 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 529 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 530 531 if (udp_mask->hdr.dst_port) { 532 filter->dst_port_mask = udp_mask->hdr.dst_port; 533 en |= !use_ntuple ? 0 : 534 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 535 } 536 537 if (udp_mask->hdr.src_port) { 538 filter->src_port_mask = udp_mask->hdr.src_port; 539 en |= !use_ntuple ? 0 : 540 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 541 } 542 break; 543 case RTE_FLOW_ITEM_TYPE_VXLAN: 544 vxlan_spec = item->spec; 545 vxlan_mask = item->mask; 546 /* Check if VXLAN item is used to describe protocol. 547 * If yes, both spec and mask should be NULL. 548 * If no, both spec and mask shouldn't be NULL. 549 */ 550 if ((!vxlan_spec && vxlan_mask) || 551 (vxlan_spec && !vxlan_mask)) { 552 rte_flow_error_set(error, 553 EINVAL, 554 RTE_FLOW_ERROR_TYPE_ITEM, 555 item, 556 "Invalid VXLAN item"); 557 return -rte_errno; 558 } 559 560 if (!vxlan_spec && !vxlan_mask) { 561 filter->tunnel_type = 562 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 563 break; 564 } 565 566 if ((vxlan_spec->hdr.rsvd0[0] != 0) || 567 (vxlan_spec->hdr.rsvd0[1] != 0) || 568 (vxlan_spec->hdr.rsvd0[2] != 0) || 569 (vxlan_spec->hdr.rsvd1 != 0) || 570 (vxlan_spec->hdr.flags != 8)) { 571 rte_flow_error_set(error, 572 EINVAL, 573 RTE_FLOW_ERROR_TYPE_ITEM, 574 item, 575 "Invalid VXLAN item"); 576 return -rte_errno; 577 } 578 579 /* Check if VNI is masked. */ 580 if (vxlan_mask != NULL) { 581 vni_masked = 582 !!memcmp(vxlan_mask->hdr.vni, vni_mask, 583 RTE_DIM(vni_mask)); 584 if (vni_masked) { 585 rte_flow_error_set 586 (error, 587 EINVAL, 588 RTE_FLOW_ERROR_TYPE_ITEM, 589 item, 590 "Invalid VNI mask"); 591 return -rte_errno; 592 } 593 594 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 595 vxlan_spec->hdr.vni, 3); 596 filter->vni = 597 rte_be_to_cpu_32(tenant_id_be); 598 filter->tunnel_type = 599 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 600 } 601 break; 602 case RTE_FLOW_ITEM_TYPE_NVGRE: 603 nvgre_spec = item->spec; 604 nvgre_mask = item->mask; 605 /* Check if NVGRE item is used to describe protocol. 606 * If yes, both spec and mask should be NULL. 607 * If no, both spec and mask shouldn't be NULL. 608 */ 609 if ((!nvgre_spec && nvgre_mask) || 610 (nvgre_spec && !nvgre_mask)) { 611 rte_flow_error_set(error, 612 EINVAL, 613 RTE_FLOW_ERROR_TYPE_ITEM, 614 item, 615 "Invalid NVGRE item"); 616 return -rte_errno; 617 } 618 619 if (!nvgre_spec && !nvgre_mask) { 620 filter->tunnel_type = 621 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 622 break; 623 } 624 625 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || 626 nvgre_spec->protocol != 0x6558) { 627 rte_flow_error_set(error, 628 EINVAL, 629 RTE_FLOW_ERROR_TYPE_ITEM, 630 item, 631 "Invalid NVGRE item"); 632 return -rte_errno; 633 } 634 635 if (nvgre_spec && nvgre_mask) { 636 tni_masked = 637 !!memcmp(nvgre_mask->tni, tni_mask, 638 RTE_DIM(tni_mask)); 639 if (tni_masked) { 640 rte_flow_error_set 641 (error, 642 EINVAL, 643 RTE_FLOW_ERROR_TYPE_ITEM, 644 item, 645 "Invalid TNI mask"); 646 return -rte_errno; 647 } 648 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 649 nvgre_spec->tni, 3); 650 filter->vni = 651 rte_be_to_cpu_32(tenant_id_be); 652 filter->tunnel_type = 653 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 654 } 655 break; 656 657 case RTE_FLOW_ITEM_TYPE_GRE: 658 gre_spec = (const struct rte_flow_item_gre *)item->spec; 659 gre_mask = (const struct rte_flow_item_gre *)item->mask; 660 661 /* 662 *Check if GRE item is used to describe protocol. 663 * If yes, both spec and mask should be NULL. 664 * If no, both spec and mask shouldn't be NULL. 665 */ 666 if (!!gre_spec ^ !!gre_mask) { 667 rte_flow_error_set(error, EINVAL, 668 RTE_FLOW_ERROR_TYPE_ITEM, 669 item, 670 "Invalid GRE item"); 671 return -rte_errno; 672 } 673 674 if (!gre_spec && !gre_mask) { 675 filter->tunnel_type = 676 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE; 677 break; 678 } 679 break; 680 default: 681 break; 682 } 683 item++; 684 } 685 filter->enables = en; 686 filter->valid_flags = valid_flags; 687 688 /* Items parsed but no filter to create in HW. */ 689 if (filter->enables == 0 && filter->valid_flags == 0) 690 filter->filter_type = HWRM_CFA_CONFIG; 691 692 return 0; 693 } 694 695 /* Parse attributes */ 696 static int 697 bnxt_flow_parse_attr(const struct rte_flow_attr *attr, 698 struct rte_flow_error *error) 699 { 700 /* Must be input direction */ 701 if (!attr->ingress) { 702 rte_flow_error_set(error, 703 EINVAL, 704 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 705 attr, 706 "Only support ingress."); 707 return -rte_errno; 708 } 709 710 /* Not supported */ 711 if (attr->egress) { 712 rte_flow_error_set(error, 713 EINVAL, 714 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 715 attr, 716 "No support for egress."); 717 return -rte_errno; 718 } 719 720 return 0; 721 } 722 723 static struct bnxt_filter_info * 724 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 725 { 726 struct bnxt_filter_info *mf, *f0; 727 struct bnxt_vnic_info *vnic0; 728 int i; 729 730 vnic0 = bnxt_get_default_vnic(bp); 731 f0 = STAILQ_FIRST(&vnic0->filter); 732 733 /* This flow has same DST MAC as the port/l2 filter. */ 734 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0) 735 return f0; 736 737 for (i = bp->max_vnics - 1; i >= 0; i--) { 738 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 739 740 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 741 continue; 742 743 STAILQ_FOREACH(mf, &vnic->filter, next) { 744 745 if (mf->matching_l2_fltr_ptr) 746 continue; 747 748 if (mf->ethertype == nf->ethertype && 749 mf->l2_ovlan == nf->l2_ovlan && 750 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 751 mf->l2_ivlan == nf->l2_ivlan && 752 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 753 !memcmp(mf->src_macaddr, nf->src_macaddr, 754 RTE_ETHER_ADDR_LEN) && 755 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 756 RTE_ETHER_ADDR_LEN)) 757 return mf; 758 } 759 } 760 return NULL; 761 } 762 763 static struct bnxt_filter_info * 764 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 765 struct bnxt_vnic_info *vnic) 766 { 767 struct bnxt_filter_info *filter1; 768 int rc; 769 770 /* Alloc new L2 filter. 771 * This flow needs MAC filter which does not match any existing 772 * L2 filters. 773 */ 774 filter1 = bnxt_get_unused_filter(bp); 775 if (filter1 == NULL) 776 return NULL; 777 778 memcpy(filter1, nf, sizeof(*filter1)); 779 780 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; 781 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 782 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 783 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) { 784 filter1->flags |= 785 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 786 PMD_DRV_LOG(DEBUG, "Create Outer filter\n"); 787 } 788 789 if (nf->filter_type == HWRM_CFA_L2_FILTER && 790 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 791 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) { 792 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n"); 793 filter1->flags |= 794 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID; 795 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN); 796 } else { 797 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); 798 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN); 799 } 800 801 if (nf->priority && 802 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG || 803 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 804 /* Tell the FW where to place the filter in the table. */ 805 if (nf->priority > 65535) { 806 filter1->pri_hint = 807 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER; 808 /* This will place the filter in TCAM */ 809 filter1->l2_filter_id_hint = (uint64_t)-1; 810 } 811 } 812 813 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG | 814 BNXT_FLOW_L2_SRC_VALID_FLAG | 815 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 816 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 817 filter1->enables = 818 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 819 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; 820 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 821 } 822 823 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) { 824 filter1->flags |= 825 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP; 826 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) { 827 /* Num VLANs for drop filter will/should be 0. 828 * If the req is memset to 0, then the count will 829 * be automatically set to 0. 830 */ 831 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) { 832 filter1->enables |= 833 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS; 834 } else { 835 filter1->enables |= 836 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS; 837 filter1->flags |= 838 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 839 } 840 } 841 } 842 843 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, 844 filter1); 845 if (rc) { 846 bnxt_free_filter(bp, filter1); 847 return NULL; 848 } 849 return filter1; 850 } 851 852 struct bnxt_filter_info * 853 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 854 struct bnxt_vnic_info *vnic) 855 { 856 struct bnxt_filter_info *l2_filter = NULL; 857 858 l2_filter = bnxt_find_matching_l2_filter(bp, nf); 859 if (l2_filter) { 860 l2_filter->l2_ref_cnt++; 861 } else { 862 l2_filter = bnxt_create_l2_filter(bp, nf, vnic); 863 if (l2_filter) { 864 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); 865 l2_filter->vnic = vnic; 866 } 867 } 868 nf->matching_l2_fltr_ptr = l2_filter; 869 870 return l2_filter; 871 } 872 873 static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic) 874 { 875 if (vnic->rx_queue_cnt > 1) 876 bnxt_hwrm_vnic_ctx_free(bp, vnic); 877 878 bnxt_hwrm_vnic_free(bp, vnic); 879 880 rte_free(vnic->fw_grp_ids); 881 vnic->fw_grp_ids = NULL; 882 883 vnic->rx_queue_cnt = 0; 884 vnic->hash_type = 0; 885 } 886 887 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic, 888 const struct rte_flow_action *act, 889 struct rte_flow_error *error) 890 { 891 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 892 uint64_t rx_offloads = dev_conf->rxmode.offloads; 893 int rc; 894 895 if (bp->nr_vnics > bp->max_vnics - 1) 896 return rte_flow_error_set(error, EINVAL, 897 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 898 NULL, 899 "Group id is invalid"); 900 901 rc = bnxt_vnic_grp_alloc(bp, vnic); 902 if (rc) 903 return rte_flow_error_set(error, -rc, 904 RTE_FLOW_ERROR_TYPE_ACTION, 905 act, 906 "Failed to alloc VNIC group"); 907 908 /* populate the fw group table */ 909 bnxt_vnic_ring_grp_populate(bp, vnic); 910 bnxt_vnic_rules_init(vnic); 911 912 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 913 if (rc) { 914 rte_flow_error_set(error, -rc, 915 RTE_FLOW_ERROR_TYPE_ACTION, 916 act, 917 "Failed to alloc VNIC"); 918 goto ret; 919 } 920 921 /* RSS context is required only when there is more than one RSS ring */ 922 if (vnic->rx_queue_cnt > 1) { 923 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 924 if (rc) { 925 rte_flow_error_set(error, -rc, 926 RTE_FLOW_ERROR_TYPE_ACTION, 927 act, 928 "Failed to alloc VNIC context"); 929 goto ret; 930 } 931 } 932 933 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 934 vnic->vlan_strip = true; 935 else 936 vnic->vlan_strip = false; 937 938 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 939 if (rc) { 940 rte_flow_error_set(error, -rc, 941 RTE_FLOW_ERROR_TYPE_ACTION, 942 act, 943 "Failed to configure VNIC"); 944 goto ret; 945 } 946 947 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 948 if (rc) { 949 rte_flow_error_set(error, -rc, 950 RTE_FLOW_ERROR_TYPE_ACTION, 951 act, 952 "Failed to configure VNIC plcmode"); 953 goto ret; 954 } 955 956 bp->nr_vnics++; 957 958 return 0; 959 960 ret: 961 bnxt_vnic_cleanup(bp, vnic); 962 return rc; 963 } 964 965 static int match_vnic_rss_cfg(struct bnxt *bp, 966 struct bnxt_vnic_info *vnic, 967 const struct rte_flow_action_rss *rss) 968 { 969 unsigned int match = 0, i; 970 971 if (vnic->rx_queue_cnt != rss->queue_num) 972 return -EINVAL; 973 974 for (i = 0; i < rss->queue_num; i++) { 975 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt && 976 !bp->rx_queues[rss->queue[i]]->rx_started) 977 return -EINVAL; 978 } 979 980 for (i = 0; i < vnic->rx_queue_cnt; i++) { 981 int j; 982 983 for (j = 0; j < vnic->rx_queue_cnt; j++) { 984 if (bp->grp_info[rss->queue[i]].fw_grp_id == 985 vnic->fw_grp_ids[j]) 986 match++; 987 } 988 } 989 990 if (match != vnic->rx_queue_cnt) { 991 PMD_DRV_LOG(ERR, 992 "VNIC queue count %d vs queues matched %d\n", 993 match, vnic->rx_queue_cnt); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 static void 1001 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, 1002 struct bnxt_filter_info *filter1, 1003 int use_ntuple) 1004 { 1005 if (!use_ntuple && 1006 !(filter->valid_flags & 1007 ~(BNXT_FLOW_L2_DST_VALID_FLAG | 1008 BNXT_FLOW_L2_SRC_VALID_FLAG | 1009 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 1010 BNXT_FLOW_L2_INNER_DST_VALID_FLAG | 1011 BNXT_FLOW_L2_DROP_FLAG | 1012 BNXT_FLOW_PARSE_INNER_FLAG))) { 1013 filter->flags = filter1->flags; 1014 filter->enables = filter1->enables; 1015 filter->filter_type = HWRM_CFA_L2_FILTER; 1016 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN); 1017 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1018 filter->pri_hint = filter1->pri_hint; 1019 filter->l2_filter_id_hint = filter1->l2_filter_id_hint; 1020 } 1021 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1022 filter->l2_ref_cnt = filter1->l2_ref_cnt; 1023 filter->flow_id = filter1->flow_id; 1024 PMD_DRV_LOG(DEBUG, 1025 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n", 1026 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); 1027 } 1028 1029 static int 1030 bnxt_validate_rss_action(const struct rte_flow_action actions[]) 1031 { 1032 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1033 switch (actions->type) { 1034 case RTE_FLOW_ACTION_TYPE_VOID: 1035 break; 1036 case RTE_FLOW_ACTION_TYPE_RSS: 1037 break; 1038 default: 1039 return -ENOTSUP; 1040 } 1041 } 1042 1043 return 0; 1044 } 1045 1046 static int 1047 bnxt_get_vnic(struct bnxt *bp, uint32_t group) 1048 { 1049 int vnic_id = 0; 1050 1051 /* For legacy NS3 based implementations, 1052 * group_id will be mapped to a VNIC ID. 1053 */ 1054 if (BNXT_STINGRAY(bp)) 1055 vnic_id = group; 1056 1057 /* Non NS3 cases, group_id will be ignored. 1058 * Setting will be configured on default VNIC. 1059 */ 1060 return vnic_id; 1061 } 1062 1063 static int 1064 bnxt_vnic_rss_cfg_update(struct bnxt *bp, 1065 struct bnxt_vnic_info *vnic, 1066 const struct rte_flow_action *act, 1067 struct rte_flow_error *error) 1068 { 1069 const struct rte_flow_action_rss *rss; 1070 unsigned int rss_idx, i, j, fw_idx; 1071 uint32_t hash_type; 1072 uint64_t types; 1073 int rc; 1074 1075 rss = (const struct rte_flow_action_rss *)act->conf; 1076 1077 /* must specify either all the Rx queues created by application or zero queues */ 1078 if (rss->queue_num && vnic->rx_queue_cnt != rss->queue_num) { 1079 rte_flow_error_set(error, 1080 EINVAL, 1081 RTE_FLOW_ERROR_TYPE_ACTION, 1082 act, 1083 "Incorrect RXQ count"); 1084 rc = -rte_errno; 1085 goto ret; 1086 } 1087 1088 /* Validate Rx queues */ 1089 for (i = 0; i < rss->queue_num; i++) { 1090 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]); 1091 1092 if (rss->queue[i] >= bp->rx_nr_rings || 1093 !bp->rx_queues[rss->queue[i]]) { 1094 rte_flow_error_set(error, 1095 EINVAL, 1096 RTE_FLOW_ERROR_TYPE_ACTION, 1097 act, 1098 "Invalid queue ID for RSS"); 1099 rc = -rte_errno; 1100 goto ret; 1101 } 1102 } 1103 1104 /* Duplicate queue ids are not supported. */ 1105 for (i = 0; i < rss->queue_num; i++) { 1106 for (j = i + 1; j < rss->queue_num; j++) { 1107 if (rss->queue[i] == rss->queue[j]) { 1108 rte_flow_error_set(error, 1109 EINVAL, 1110 RTE_FLOW_ERROR_TYPE_ACTION, 1111 act, 1112 "Duplicate queue ID for RSS"); 1113 rc = -rte_errno; 1114 goto ret; 1115 } 1116 } 1117 } 1118 1119 if (BNXT_IS_HASH_FUNC_DEFAULT(rss->func) && 1120 BNXT_IS_HASH_FUNC_TOEPLITZ(rss->func) && 1121 BNXT_IS_HASH_FUNC_SIMPLE_XOR(bp, rss->func)) { 1122 rte_flow_error_set(error, 1123 ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ACTION, 1125 act, 1126 "Unsupported RSS hash function"); 1127 rc = -rte_errno; 1128 goto ret; 1129 } 1130 1131 /* key_len should match the hash key supported by hardware */ 1132 if (rss->key_len != 0 && rss->key_len != HW_HASH_KEY_SIZE) { 1133 rte_flow_error_set(error, 1134 EINVAL, 1135 RTE_FLOW_ERROR_TYPE_ACTION, 1136 act, 1137 "Incorrect hash key parameters"); 1138 rc = -rte_errno; 1139 goto ret; 1140 } 1141 1142 /* Currently RSS hash on inner and outer headers are supported. 1143 * 0 => Default (innermost RSS) setting 1144 * 1 => Outermost 1145 */ 1146 if (rss->level > 1) { 1147 rte_flow_error_set(error, 1148 ENOTSUP, 1149 RTE_FLOW_ERROR_TYPE_ACTION, 1150 act, 1151 "Unsupported hash level"); 1152 rc = -rte_errno; 1153 goto ret; 1154 } 1155 1156 if ((rss->queue_num == 0 && rss->queue != NULL) || 1157 (rss->queue_num != 0 && rss->queue == NULL)) { 1158 rte_flow_error_set(error, 1159 EINVAL, 1160 RTE_FLOW_ERROR_TYPE_ACTION, 1161 act, 1162 "Invalid queue config specified"); 1163 rc = -rte_errno; 1164 goto ret; 1165 } 1166 1167 /* If RSS types is 0, use a best effort configuration */ 1168 types = rss->types ? rss->types : RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6; 1169 1170 hash_type = bnxt_rte_to_hwrm_hash_types(types); 1171 1172 /* If requested types can't be supported, leave existing settings */ 1173 if (hash_type) 1174 vnic->hash_type = hash_type; 1175 1176 vnic->hash_mode = 1177 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1178 1179 /* For P7 chips update the hash_type if hash_type not explicitly passed. 1180 * TODO: For P5 chips. 1181 */ 1182 if (BNXT_CHIP_P7(bp) && 1183 vnic->hash_mode == BNXT_HASH_MODE_DEFAULT && !hash_type) 1184 vnic->hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | 1185 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1186 1187 /* TODO: 1188 * hash will be performed on the L3 and L4 packet headers. 1189 * specific RSS hash types like IPv4-TCP etc... or L4-chksum or IPV4-chksum 1190 * will NOT have any bearing and will not be honored. 1191 * Check and reject flow create accordingly. TODO. 1192 */ 1193 1194 rc = bnxt_rte_flow_to_hwrm_ring_select_mode(rss->func, 1195 rss->types, 1196 bp, vnic); 1197 if (rc) { 1198 rte_flow_error_set(error, 1199 ENOTSUP, 1200 RTE_FLOW_ERROR_TYPE_ACTION, 1201 act, 1202 "Unsupported RSS hash parameters"); 1203 rc = -rte_errno; 1204 goto ret; 1205 } 1206 1207 /* Update RSS key only if key_len != 0 */ 1208 if (rss->key_len != 0) 1209 memcpy(vnic->rss_hash_key, rss->key, rss->key_len); 1210 1211 if (rss->queue_num == 0) 1212 goto skip_rss_table; 1213 1214 /* Prepare the indirection table */ 1215 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; 1216 rss_idx++, fw_idx++) { 1217 uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state; 1218 struct bnxt_rx_queue *rxq; 1219 uint32_t idx; 1220 1221 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 1222 idx = rss->queue[fw_idx % rss->queue_num]; 1223 if (rxq_state[idx] != RTE_ETH_QUEUE_STATE_STOPPED) 1224 break; 1225 fw_idx++; 1226 } 1227 1228 if (i == bp->rx_cp_nr_rings) 1229 return 0; 1230 1231 if (BNXT_CHIP_P5_P7(bp)) { 1232 rxq = bp->rx_queues[idx]; 1233 vnic->rss_table[rss_idx * 2] = 1234 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1235 vnic->rss_table[rss_idx * 2 + 1] = 1236 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1237 } else { 1238 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[idx]; 1239 } 1240 } 1241 1242 skip_rss_table: 1243 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1244 if (rc != 0) { 1245 rte_flow_error_set(error, 1246 -rc, 1247 RTE_FLOW_ERROR_TYPE_ACTION, 1248 act, 1249 "VNIC RSS configure failed"); 1250 rc = -rte_errno; 1251 goto ret; 1252 } 1253 ret: 1254 return rc; 1255 } 1256 1257 static int 1258 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, 1259 const struct rte_flow_item pattern[], 1260 const struct rte_flow_action actions[], 1261 const struct rte_flow_attr *attr, 1262 struct rte_flow_error *error, 1263 struct bnxt_filter_info *filter) 1264 { 1265 const struct rte_flow_action *act = 1266 bnxt_flow_non_void_action(actions); 1267 struct bnxt *bp = dev->data->dev_private; 1268 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1269 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL; 1270 const struct rte_flow_action_queue *act_q; 1271 const struct rte_flow_action_vf *act_vf; 1272 struct bnxt_filter_info *filter1 = NULL; 1273 const struct rte_flow_action_rss *rss; 1274 struct bnxt_rx_queue *rxq = NULL; 1275 int dflt_vnic, vnic_id; 1276 unsigned int rss_idx; 1277 uint32_t vf = 0, i; 1278 int rc, use_ntuple; 1279 1280 rc = 1281 bnxt_validate_and_parse_flow_type(attr, pattern, error, filter); 1282 if (rc != 0) 1283 goto ret; 1284 1285 rc = bnxt_flow_parse_attr(attr, error); 1286 if (rc != 0) 1287 goto ret; 1288 1289 /* Since we support ingress attribute only - right now. */ 1290 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1291 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; 1292 1293 use_ntuple = bnxt_filter_type_check(pattern, error); 1294 1295 start: 1296 switch (act->type) { 1297 case RTE_FLOW_ACTION_TYPE_QUEUE: 1298 /* Allow this flow. Redirect to a VNIC. */ 1299 act_q = (const struct rte_flow_action_queue *)act->conf; 1300 if (!act_q->index || act_q->index >= bp->rx_nr_rings) { 1301 rte_flow_error_set(error, 1302 EINVAL, 1303 RTE_FLOW_ERROR_TYPE_ACTION, 1304 act, 1305 "Invalid queue ID."); 1306 rc = -rte_errno; 1307 goto ret; 1308 } 1309 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); 1310 1311 vnic_id = attr->group; 1312 if (!vnic_id) { 1313 PMD_DRV_LOG(DEBUG, "Group id is 0\n"); 1314 vnic_id = act_q->index; 1315 } 1316 1317 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1318 1319 vnic = &bp->vnic_info[vnic_id]; 1320 if (vnic->rx_queue_cnt) { 1321 if (vnic->start_grp_id != act_q->index) { 1322 PMD_DRV_LOG(ERR, 1323 "VNIC already in use\n"); 1324 rte_flow_error_set(error, 1325 EINVAL, 1326 RTE_FLOW_ERROR_TYPE_ACTION, 1327 act, 1328 "VNIC already in use"); 1329 rc = -rte_errno; 1330 goto ret; 1331 } 1332 goto use_vnic; 1333 } 1334 1335 rxq = bp->rx_queues[act_q->index]; 1336 1337 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq && 1338 vnic->fw_vnic_id != INVALID_HW_RING_ID) 1339 goto use_vnic; 1340 1341 if (!rxq) { 1342 PMD_DRV_LOG(ERR, 1343 "Queue invalid or used with other VNIC\n"); 1344 rte_flow_error_set(error, 1345 EINVAL, 1346 RTE_FLOW_ERROR_TYPE_ACTION, 1347 act, 1348 "Queue invalid queue or in use"); 1349 rc = -rte_errno; 1350 goto ret; 1351 } 1352 1353 rxq->vnic = vnic; 1354 rxq->rx_started = 1; 1355 vnic->rx_queue_cnt++; 1356 vnic->start_grp_id = act_q->index; 1357 vnic->end_grp_id = act_q->index; 1358 vnic->func_default = 0; //This is not a default VNIC. 1359 1360 PMD_DRV_LOG(DEBUG, "VNIC found\n"); 1361 1362 rc = bnxt_vnic_prep(bp, vnic, act, error); 1363 if (rc) 1364 goto ret; 1365 1366 PMD_DRV_LOG(DEBUG, 1367 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1368 act_q->index, vnic, vnic->fw_grp_ids); 1369 1370 use_vnic: 1371 vnic->ff_pool_idx = vnic_id; 1372 PMD_DRV_LOG(DEBUG, 1373 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); 1374 filter->dst_id = vnic->fw_vnic_id; 1375 1376 /* For ntuple filter, create the L2 filter with default VNIC. 1377 * The user specified redirect queue will be set while creating 1378 * the ntuple filter in hardware. 1379 */ 1380 vnic0 = bnxt_get_default_vnic(bp); 1381 if (use_ntuple) 1382 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1383 else 1384 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1385 if (filter1 == NULL) { 1386 rte_flow_error_set(error, 1387 ENOSPC, 1388 RTE_FLOW_ERROR_TYPE_ACTION, 1389 act, 1390 "Filter not available"); 1391 rc = -rte_errno; 1392 goto ret; 1393 } 1394 1395 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n", 1396 filter, filter1, filter1->l2_ref_cnt); 1397 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1398 break; 1399 case RTE_FLOW_ACTION_TYPE_DROP: 1400 vnic0 = &bp->vnic_info[0]; 1401 filter->dst_id = vnic0->fw_vnic_id; 1402 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG; 1403 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1404 if (filter1 == NULL) { 1405 rte_flow_error_set(error, 1406 ENOSPC, 1407 RTE_FLOW_ERROR_TYPE_ACTION, 1408 act, 1409 "Filter not available"); 1410 rc = -rte_errno; 1411 goto ret; 1412 } 1413 1414 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1415 filter->flags = 1416 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; 1417 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1418 filter->flags = 1419 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1420 1421 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1422 break; 1423 case RTE_FLOW_ACTION_TYPE_VF: 1424 act_vf = (const struct rte_flow_action_vf *)act->conf; 1425 vf = act_vf->id; 1426 1427 if (filter->tunnel_type == 1428 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 1429 filter->tunnel_type == 1430 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) { 1431 /* If issued on a VF, ensure id is 0 and is trusted */ 1432 if (BNXT_VF(bp)) { 1433 if (!BNXT_VF_IS_TRUSTED(bp) || vf) { 1434 rte_flow_error_set(error, EINVAL, 1435 RTE_FLOW_ERROR_TYPE_ACTION, 1436 act, 1437 "Incorrect VF"); 1438 rc = -rte_errno; 1439 goto ret; 1440 } 1441 } 1442 1443 filter->enables |= filter->tunnel_type; 1444 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER; 1445 goto done; 1446 } 1447 1448 if (vf >= bp->pdev->max_vfs) { 1449 rte_flow_error_set(error, 1450 EINVAL, 1451 RTE_FLOW_ERROR_TYPE_ACTION, 1452 act, 1453 "Incorrect VF id!"); 1454 rc = -rte_errno; 1455 goto ret; 1456 } 1457 1458 filter->mirror_vnic_id = 1459 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 1460 if (dflt_vnic < 0) { 1461 /* This simply indicates there's no driver loaded. 1462 * This is not an error. 1463 */ 1464 rte_flow_error_set(error, 1465 EINVAL, 1466 RTE_FLOW_ERROR_TYPE_ACTION, 1467 act, 1468 "Unable to get default VNIC for VF"); 1469 rc = -rte_errno; 1470 goto ret; 1471 } 1472 1473 filter->mirror_vnic_id = dflt_vnic; 1474 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 1475 1476 vnic0 = &bp->vnic_info[0]; 1477 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1478 if (filter1 == NULL) { 1479 rte_flow_error_set(error, 1480 ENOSPC, 1481 RTE_FLOW_ERROR_TYPE_ACTION, 1482 act, 1483 "New filter not available"); 1484 rc = -rte_errno; 1485 goto ret; 1486 } 1487 1488 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1489 filter->flow_id = filter1->flow_id; 1490 break; 1491 case RTE_FLOW_ACTION_TYPE_RSS: 1492 rc = bnxt_validate_rss_action(actions); 1493 if (rc != 0) { 1494 rte_flow_error_set(error, 1495 EINVAL, 1496 RTE_FLOW_ERROR_TYPE_ACTION, 1497 act, 1498 "Invalid actions specified with RSS"); 1499 rc = -rte_errno; 1500 goto ret; 1501 } 1502 1503 rss = (const struct rte_flow_action_rss *)act->conf; 1504 1505 vnic_id = bnxt_get_vnic(bp, attr->group); 1506 1507 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1508 vnic = &bp->vnic_info[vnic_id]; 1509 1510 /* 1511 * For non NS3 cases, rte_flow_items will not be considered 1512 * for RSS updates. 1513 */ 1514 if (filter->filter_type == HWRM_CFA_CONFIG) { 1515 /* RSS config update requested */ 1516 rc = bnxt_vnic_rss_cfg_update(bp, vnic, act, error); 1517 if (rc != 0) 1518 goto ret; 1519 1520 filter->dst_id = vnic->fw_vnic_id; 1521 break; 1522 } 1523 1524 /* Check if requested RSS config matches RSS config of VNIC 1525 * only if it is not a fresh VNIC configuration. 1526 * Otherwise the existing VNIC configuration can be used. 1527 */ 1528 if (vnic->rx_queue_cnt) { 1529 rc = match_vnic_rss_cfg(bp, vnic, rss); 1530 if (rc) { 1531 PMD_DRV_LOG(ERR, 1532 "VNIC and RSS config mismatch\n"); 1533 rte_flow_error_set(error, 1534 EINVAL, 1535 RTE_FLOW_ERROR_TYPE_ACTION, 1536 act, 1537 "VNIC and RSS cfg mismatch"); 1538 rc = -rte_errno; 1539 goto ret; 1540 } 1541 goto vnic_found; 1542 } 1543 1544 for (i = 0; i < rss->queue_num; i++) { 1545 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", 1546 rss->queue[i]); 1547 1548 if (!rss->queue[i] || 1549 rss->queue[i] >= bp->rx_nr_rings || 1550 !bp->rx_queues[rss->queue[i]]) { 1551 rte_flow_error_set(error, 1552 EINVAL, 1553 RTE_FLOW_ERROR_TYPE_ACTION, 1554 act, 1555 "Invalid queue ID for RSS"); 1556 rc = -rte_errno; 1557 goto ret; 1558 } 1559 rxq = bp->rx_queues[rss->queue[i]]; 1560 1561 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] != 1562 INVALID_HW_RING_ID) { 1563 PMD_DRV_LOG(ERR, 1564 "queue active with other VNIC\n"); 1565 rte_flow_error_set(error, 1566 EINVAL, 1567 RTE_FLOW_ERROR_TYPE_ACTION, 1568 act, 1569 "Invalid queue ID for RSS"); 1570 rc = -rte_errno; 1571 goto ret; 1572 } 1573 1574 rxq->vnic = vnic; 1575 rxq->rx_started = 1; 1576 vnic->rx_queue_cnt++; 1577 } 1578 1579 vnic->start_grp_id = rss->queue[0]; 1580 vnic->end_grp_id = rss->queue[rss->queue_num - 1]; 1581 vnic->func_default = 0; //This is not a default VNIC. 1582 1583 rc = bnxt_vnic_prep(bp, vnic, act, error); 1584 if (rc) 1585 goto ret; 1586 1587 PMD_DRV_LOG(DEBUG, 1588 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1589 vnic_id, vnic, vnic->fw_grp_ids); 1590 1591 vnic->ff_pool_idx = vnic_id; 1592 PMD_DRV_LOG(DEBUG, 1593 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx); 1594 1595 /* This can be done only after vnic_grp_alloc is done. */ 1596 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1597 vnic->fw_grp_ids[i] = 1598 bp->grp_info[rss->queue[i]].fw_grp_id; 1599 /* Make sure vnic0 does not use these rings. */ 1600 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] = 1601 INVALID_HW_RING_ID; 1602 } 1603 1604 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) { 1605 for (i = 0; i < vnic->rx_queue_cnt; i++) 1606 vnic->rss_table[rss_idx++] = 1607 vnic->fw_grp_ids[i]; 1608 } 1609 1610 /* Configure RSS only if the queue count is > 1 */ 1611 if (vnic->rx_queue_cnt > 1) { 1612 vnic->hash_type = 1613 bnxt_rte_to_hwrm_hash_types(rss->types); 1614 vnic->hash_mode = 1615 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1616 1617 if (!rss->key_len) { 1618 /* If hash key has not been specified, 1619 * use random hash key. 1620 */ 1621 bnxt_prandom_bytes(vnic->rss_hash_key, 1622 HW_HASH_KEY_SIZE); 1623 } else { 1624 if (rss->key_len > HW_HASH_KEY_SIZE) 1625 memcpy(vnic->rss_hash_key, 1626 rss->key, 1627 HW_HASH_KEY_SIZE); 1628 else 1629 memcpy(vnic->rss_hash_key, 1630 rss->key, 1631 rss->key_len); 1632 } 1633 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1634 } else { 1635 PMD_DRV_LOG(DEBUG, "No RSS config required\n"); 1636 } 1637 1638 vnic_found: 1639 filter->dst_id = vnic->fw_vnic_id; 1640 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1641 if (filter1 == NULL) { 1642 rte_flow_error_set(error, 1643 ENOSPC, 1644 RTE_FLOW_ERROR_TYPE_ACTION, 1645 act, 1646 "New filter not available"); 1647 rc = -rte_errno; 1648 goto ret; 1649 } 1650 1651 PMD_DRV_LOG(DEBUG, "L2 filter created\n"); 1652 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1653 break; 1654 case RTE_FLOW_ACTION_TYPE_MARK: 1655 if (bp->mark_table == NULL) { 1656 rte_flow_error_set(error, 1657 ENOMEM, 1658 RTE_FLOW_ERROR_TYPE_ACTION, 1659 act, 1660 "Mark table not allocated."); 1661 rc = -rte_errno; 1662 goto ret; 1663 } 1664 1665 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 1666 PMD_DRV_LOG(DEBUG, 1667 "Disabling vector processing for mark\n"); 1668 bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts; 1669 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1670 } 1671 1672 filter->valid_flags |= BNXT_FLOW_MARK_FLAG; 1673 filter->mark = ((const struct rte_flow_action_mark *) 1674 act->conf)->id; 1675 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark); 1676 break; 1677 default: 1678 rte_flow_error_set(error, 1679 EINVAL, 1680 RTE_FLOW_ERROR_TYPE_ACTION, 1681 act, 1682 "Invalid action."); 1683 rc = -rte_errno; 1684 goto ret; 1685 } 1686 1687 done: 1688 act = bnxt_flow_non_void_action(++act); 1689 while (act->type != RTE_FLOW_ACTION_TYPE_END) 1690 goto start; 1691 1692 return rc; 1693 ret: 1694 1695 if (filter1) { 1696 bnxt_hwrm_clear_l2_filter(bp, filter1); 1697 bnxt_free_filter(bp, filter1); 1698 } 1699 1700 if (rte_errno) { 1701 if (vnic && STAILQ_EMPTY(&vnic->filter)) 1702 vnic->rx_queue_cnt = 0; 1703 1704 if (rxq && !vnic->rx_queue_cnt) 1705 rxq->vnic = &bp->vnic_info[0]; 1706 } 1707 return -rte_errno; 1708 } 1709 1710 static 1711 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, 1712 struct bnxt_filter_info *filter) 1713 { 1714 struct bnxt_vnic_info *vnic = NULL; 1715 unsigned int i; 1716 1717 for (i = 0; i < bp->max_vnics; i++) { 1718 vnic = &bp->vnic_info[i]; 1719 if (vnic->fw_vnic_id != INVALID_VNIC_ID && 1720 filter->dst_id == vnic->fw_vnic_id) { 1721 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n", 1722 vnic->ff_pool_idx); 1723 return vnic; 1724 } 1725 } 1726 return NULL; 1727 } 1728 1729 static int 1730 bnxt_flow_validate(struct rte_eth_dev *dev, 1731 const struct rte_flow_attr *attr, 1732 const struct rte_flow_item pattern[], 1733 const struct rte_flow_action actions[], 1734 struct rte_flow_error *error) 1735 { 1736 struct bnxt *bp = dev->data->dev_private; 1737 struct bnxt_vnic_info *vnic = NULL; 1738 struct bnxt_filter_info *filter; 1739 int ret = 0; 1740 1741 bnxt_acquire_flow_lock(bp); 1742 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1743 if (ret != 0) { 1744 bnxt_release_flow_lock(bp); 1745 return ret; 1746 } 1747 1748 filter = bnxt_get_unused_filter(bp); 1749 if (filter == NULL) { 1750 rte_flow_error_set(error, ENOSPC, 1751 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1752 "Not enough resources for a new flow"); 1753 bnxt_release_flow_lock(bp); 1754 return -ENOSPC; 1755 } 1756 1757 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1758 error, filter); 1759 if (ret) 1760 goto exit; 1761 1762 vnic = find_matching_vnic(bp, filter); 1763 if (vnic) { 1764 if (STAILQ_EMPTY(&vnic->filter)) { 1765 bnxt_vnic_cleanup(bp, vnic); 1766 bp->nr_vnics--; 1767 PMD_DRV_LOG(DEBUG, "Free VNIC\n"); 1768 } 1769 } 1770 1771 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1772 bnxt_hwrm_clear_em_filter(bp, filter); 1773 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1774 bnxt_hwrm_clear_ntuple_filter(bp, filter); 1775 else 1776 bnxt_hwrm_clear_l2_filter(bp, filter); 1777 1778 exit: 1779 /* No need to hold on to this filter if we are just validating flow */ 1780 bnxt_free_filter(bp, filter); 1781 bnxt_release_flow_lock(bp); 1782 1783 return ret; 1784 } 1785 1786 static void 1787 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, 1788 struct bnxt_filter_info *new_filter) 1789 { 1790 /* Clear the new L2 filter that was created in the previous step in 1791 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new 1792 * filter which points to the new destination queue and so we clear 1793 * the previous L2 filter. For ntuple filters, we are going to reuse 1794 * the old L2 filter and create new NTUPLE filter with this new 1795 * destination queue subsequently during bnxt_flow_create. So we 1796 * decrement the ref cnt of the L2 filter that would've been bumped 1797 * up previously in bnxt_validate_and_parse_flow as the old n-tuple 1798 * filter that was referencing it will be deleted now. 1799 */ 1800 bnxt_hwrm_clear_l2_filter(bp, old_filter); 1801 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { 1802 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); 1803 } else { 1804 if (new_filter->filter_type == HWRM_CFA_EM_FILTER) 1805 bnxt_hwrm_clear_em_filter(bp, old_filter); 1806 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1807 bnxt_hwrm_clear_ntuple_filter(bp, old_filter); 1808 } 1809 } 1810 1811 static int 1812 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 1813 { 1814 struct bnxt_filter_info *mf; 1815 struct rte_flow *flow; 1816 int i; 1817 1818 for (i = bp->max_vnics - 1; i >= 0; i--) { 1819 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1820 1821 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 1822 continue; 1823 1824 STAILQ_FOREACH(flow, &vnic->flow_list, next) { 1825 mf = flow->filter; 1826 1827 if (mf->filter_type == nf->filter_type && 1828 mf->flags == nf->flags && 1829 mf->src_port == nf->src_port && 1830 mf->src_port_mask == nf->src_port_mask && 1831 mf->dst_port == nf->dst_port && 1832 mf->dst_port_mask == nf->dst_port_mask && 1833 mf->ip_protocol == nf->ip_protocol && 1834 mf->ip_addr_type == nf->ip_addr_type && 1835 mf->ethertype == nf->ethertype && 1836 mf->vni == nf->vni && 1837 mf->tunnel_type == nf->tunnel_type && 1838 mf->l2_ovlan == nf->l2_ovlan && 1839 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 1840 mf->l2_ivlan == nf->l2_ivlan && 1841 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 1842 !memcmp(mf->l2_addr, nf->l2_addr, 1843 RTE_ETHER_ADDR_LEN) && 1844 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 1845 RTE_ETHER_ADDR_LEN) && 1846 !memcmp(mf->src_macaddr, nf->src_macaddr, 1847 RTE_ETHER_ADDR_LEN) && 1848 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 1849 RTE_ETHER_ADDR_LEN) && 1850 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 1851 sizeof(nf->src_ipaddr)) && 1852 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 1853 sizeof(nf->src_ipaddr_mask)) && 1854 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 1855 sizeof(nf->dst_ipaddr)) && 1856 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 1857 sizeof(nf->dst_ipaddr_mask))) { 1858 if (mf->dst_id == nf->dst_id) 1859 return -EEXIST; 1860 /* Free the old filter, update flow 1861 * with new filter 1862 */ 1863 bnxt_update_filter(bp, mf, nf); 1864 STAILQ_REMOVE(&vnic->filter, mf, 1865 bnxt_filter_info, next); 1866 STAILQ_INSERT_TAIL(&vnic->filter, nf, next); 1867 bnxt_free_filter(bp, mf); 1868 flow->filter = nf; 1869 return -EXDEV; 1870 } 1871 } 1872 } 1873 return 0; 1874 } 1875 1876 static void 1877 bnxt_setup_flow_counter(struct bnxt *bp) 1878 { 1879 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && 1880 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { 1881 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1882 bnxt_flow_cnt_alarm_cb, 1883 (void *)bp); 1884 bp->flags |= BNXT_FLAG_FC_THREAD; 1885 } 1886 } 1887 1888 void bnxt_flow_cnt_alarm_cb(void *arg) 1889 { 1890 int rc = 0; 1891 struct bnxt *bp = arg; 1892 1893 if (!bp->flow_stat->rx_fc_out_tbl.va) { 1894 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); 1895 bnxt_cancel_fc_thread(bp); 1896 return; 1897 } 1898 1899 if (!bp->flow_stat->flow_count) { 1900 bnxt_cancel_fc_thread(bp); 1901 return; 1902 } 1903 1904 if (!bp->eth_dev->data->dev_started) { 1905 bnxt_cancel_fc_thread(bp); 1906 return; 1907 } 1908 1909 rc = bnxt_flow_stats_req(bp); 1910 if (rc) { 1911 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n"); 1912 return; 1913 } 1914 1915 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1916 bnxt_flow_cnt_alarm_cb, 1917 (void *)bp); 1918 } 1919 1920 /* Query an requested flow rule. */ 1921 static int 1922 bnxt_flow_query_all(struct rte_flow *flow, 1923 const struct rte_flow_action *actions, void *data, 1924 struct rte_flow_error *error) 1925 { 1926 struct rte_flow_action_rss *rss_conf; 1927 struct bnxt_vnic_info *vnic; 1928 1929 vnic = flow->vnic; 1930 if (vnic == NULL) 1931 return rte_flow_error_set(error, EINVAL, 1932 RTE_FLOW_ERROR_TYPE_HANDLE, flow, 1933 "Invalid flow: failed to query flow."); 1934 1935 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1936 switch (actions->type) { 1937 case RTE_FLOW_ACTION_TYPE_VOID: 1938 break; 1939 case RTE_FLOW_ACTION_TYPE_COUNT: 1940 break; 1941 case RTE_FLOW_ACTION_TYPE_RSS: 1942 /* Full details of rte_flow_action_rss not available yet TBD*/ 1943 rss_conf = (struct rte_flow_action_rss *)data; 1944 1945 /* toeplitz is default */ 1946 if (vnic->ring_select_mode == 1947 HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ) 1948 rss_conf->func = vnic->hash_f_local; 1949 else 1950 rss_conf->func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; 1951 1952 break; 1953 default: 1954 return rte_flow_error_set(error, ENOTSUP, 1955 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1956 "action is not supported"); 1957 } 1958 } 1959 1960 return 0; 1961 } 1962 1963 static int 1964 bnxt_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 1965 const struct rte_flow_action *actions, void *data, 1966 struct rte_flow_error *error) 1967 { 1968 struct bnxt *bp = dev->data->dev_private; 1969 int ret = 0; 1970 1971 if (bp == NULL) 1972 return -ENODEV; 1973 1974 bnxt_acquire_flow_lock(bp); 1975 ret = bnxt_flow_query_all(flow, actions, data, error); 1976 bnxt_release_flow_lock(bp); 1977 1978 return ret; 1979 } 1980 1981 static struct rte_flow * 1982 bnxt_flow_create(struct rte_eth_dev *dev, 1983 const struct rte_flow_attr *attr, 1984 const struct rte_flow_item pattern[], 1985 const struct rte_flow_action actions[], 1986 struct rte_flow_error *error) 1987 { 1988 struct bnxt *bp = dev->data->dev_private; 1989 struct bnxt_vnic_info *vnic = NULL; 1990 struct bnxt_filter_info *filter; 1991 bool update_flow = false; 1992 struct rte_flow *flow; 1993 int ret = 0; 1994 uint32_t tun_type, flow_id; 1995 1996 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1997 rte_flow_error_set(error, EINVAL, 1998 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1999 "Failed to create flow, Not a Trusted VF!"); 2000 return NULL; 2001 } 2002 2003 if (!dev->data->dev_started) { 2004 rte_flow_error_set(error, 2005 EINVAL, 2006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2007 NULL, 2008 "Device must be started"); 2009 return NULL; 2010 } 2011 2012 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); 2013 if (!flow) { 2014 rte_flow_error_set(error, ENOMEM, 2015 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2016 "Failed to allocate memory"); 2017 return flow; 2018 } 2019 2020 bnxt_acquire_flow_lock(bp); 2021 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 2022 if (ret != 0) { 2023 PMD_DRV_LOG(ERR, "Not a validate flow.\n"); 2024 goto free_flow; 2025 } 2026 2027 filter = bnxt_get_unused_filter(bp); 2028 if (filter == NULL) { 2029 rte_flow_error_set(error, ENOSPC, 2030 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2031 "Not enough resources for a new flow"); 2032 goto free_flow; 2033 } 2034 2035 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 2036 error, filter); 2037 if (ret != 0) 2038 goto free_filter; 2039 2040 ret = bnxt_match_filter(bp, filter); 2041 if (ret == -EEXIST) { 2042 PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); 2043 /* Clear the filter that was created as part of 2044 * validate_and_parse_flow() above 2045 */ 2046 bnxt_hwrm_clear_l2_filter(bp, filter); 2047 goto free_filter; 2048 } else if (ret == -EXDEV) { 2049 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); 2050 PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); 2051 update_flow = true; 2052 } 2053 2054 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 2055 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 2056 * in such a case. 2057 */ 2058 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2059 (filter->enables == filter->tunnel_type || 2060 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 2061 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) { 2062 if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) { 2063 struct rte_eth_udp_tunnel tunnel = {0}; 2064 2065 /* hwrm_tunnel_dst_port_alloc converts to Big Endian */ 2066 tunnel.udp_port = BNXT_NTOHS(filter->dst_port); 2067 if (filter->tunnel_type == 2068 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) { 2069 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN; 2070 } else if (filter->tunnel_type == 2071 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) { 2072 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE; 2073 } else { 2074 rte_flow_error_set(error, EINVAL, 2075 RTE_FLOW_ERROR_TYPE_HANDLE, 2076 NULL, 2077 "Invalid tunnel type"); 2078 ret = -EINVAL; 2079 goto free_filter; 2080 } 2081 ret = bnxt_udp_tunnel_port_add_op(bp->eth_dev, &tunnel); 2082 if (ret != 0) { 2083 rte_flow_error_set(error, -ret, 2084 RTE_FLOW_ERROR_TYPE_HANDLE, 2085 NULL, 2086 "Fail to add tunnel port"); 2087 goto free_filter; 2088 } 2089 } 2090 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2091 if (ret) { 2092 rte_flow_error_set(error, -ret, 2093 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2094 "Unable to query tunnel to VF"); 2095 goto free_filter; 2096 } 2097 if (tun_type == (1U << filter->tunnel_type)) { 2098 ret = 2099 bnxt_hwrm_tunnel_redirect_free(bp, 2100 filter->tunnel_type); 2101 if (ret) { 2102 PMD_DRV_LOG(ERR, 2103 "Unable to free existing tunnel\n"); 2104 rte_flow_error_set(error, -ret, 2105 RTE_FLOW_ERROR_TYPE_HANDLE, 2106 NULL, 2107 "Unable to free preexisting " 2108 "tunnel on VF"); 2109 goto free_filter; 2110 } 2111 } 2112 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type); 2113 if (ret) { 2114 rte_flow_error_set(error, -ret, 2115 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2116 "Unable to redirect tunnel to VF"); 2117 goto free_filter; 2118 } 2119 vnic = &bp->vnic_info[0]; 2120 goto done; 2121 } 2122 2123 if (filter->filter_type == HWRM_CFA_EM_FILTER) { 2124 filter->enables |= 2125 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2126 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); 2127 if (ret != 0) { 2128 rte_flow_error_set(error, -ret, 2129 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2130 "Failed to create EM filter"); 2131 goto free_filter; 2132 } 2133 } 2134 2135 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { 2136 filter->enables |= 2137 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2138 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); 2139 if (ret != 0) { 2140 rte_flow_error_set(error, -ret, 2141 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2142 "Failed to create ntuple filter"); 2143 goto free_filter; 2144 } 2145 } 2146 2147 vnic = find_matching_vnic(bp, filter); 2148 done: 2149 if (!ret || update_flow) { 2150 flow->filter = filter; 2151 flow->vnic = vnic; 2152 if (update_flow) { 2153 ret = -EXDEV; 2154 goto free_flow; 2155 } 2156 2157 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2158 PMD_DRV_LOG(DEBUG, 2159 "Mark action: mark id 0x%x, flow id 0x%x\n", 2160 filter->mark, filter->flow_id); 2161 2162 /* TCAM and EM should be 16-bit only. 2163 * Other modes not supported. 2164 */ 2165 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2166 if (bp->mark_table[flow_id].valid) { 2167 rte_flow_error_set(error, EEXIST, 2168 RTE_FLOW_ERROR_TYPE_HANDLE, 2169 NULL, 2170 "Flow with mark id exists"); 2171 bnxt_clear_one_vnic_filter(bp, filter); 2172 goto free_filter; 2173 } 2174 bp->mark_table[flow_id].valid = true; 2175 bp->mark_table[flow_id].mark_id = filter->mark; 2176 } 2177 2178 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2179 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); 2180 2181 if (BNXT_FLOW_XSTATS_EN(bp)) 2182 bp->flow_stat->flow_count++; 2183 bnxt_release_flow_lock(bp); 2184 bnxt_setup_flow_counter(bp); 2185 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); 2186 return flow; 2187 } 2188 2189 free_filter: 2190 bnxt_free_filter(bp, filter); 2191 free_flow: 2192 if (ret == -EEXIST) 2193 rte_flow_error_set(error, ret, 2194 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2195 "Matching Flow exists."); 2196 else if (ret == -EXDEV) 2197 rte_flow_error_set(error, 0, 2198 RTE_FLOW_ERROR_TYPE_NONE, NULL, 2199 "Flow with pattern exists, updating destination queue"); 2200 else if (!rte_errno) 2201 rte_flow_error_set(error, -ret, 2202 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2203 "Failed to create flow."); 2204 rte_free(flow); 2205 flow = NULL; 2206 bnxt_release_flow_lock(bp); 2207 return flow; 2208 } 2209 2210 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, 2211 struct bnxt_filter_info *filter, 2212 struct rte_flow_error *error) 2213 { 2214 uint16_t tun_dst_fid; 2215 uint32_t tun_type; 2216 int ret = 0; 2217 2218 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2219 if (ret) { 2220 rte_flow_error_set(error, -ret, 2221 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2222 "Unable to query tunnel to VF"); 2223 return ret; 2224 } 2225 if (tun_type == (1U << filter->tunnel_type)) { 2226 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type, 2227 &tun_dst_fid); 2228 if (ret) { 2229 rte_flow_error_set(error, -ret, 2230 RTE_FLOW_ERROR_TYPE_HANDLE, 2231 NULL, 2232 "tunnel_redirect info cmd fail"); 2233 return ret; 2234 } 2235 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n", 2236 tun_dst_fid + bp->first_vf_id, bp->fw_fid); 2237 2238 /* Tunnel doesn't belong to this VF, so don't send HWRM 2239 * cmd, just delete the flow from driver 2240 */ 2241 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) { 2242 PMD_DRV_LOG(ERR, 2243 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n"); 2244 } else { 2245 ret = bnxt_hwrm_tunnel_redirect_free(bp, 2246 filter->tunnel_type); 2247 if (ret) { 2248 rte_flow_error_set(error, -ret, 2249 RTE_FLOW_ERROR_TYPE_HANDLE, 2250 NULL, 2251 "Unable to free tunnel redirection"); 2252 return ret; 2253 } 2254 } 2255 } 2256 return ret; 2257 } 2258 2259 static int 2260 _bnxt_flow_destroy(struct bnxt *bp, 2261 struct rte_flow *flow, 2262 struct rte_flow_error *error) 2263 { 2264 struct bnxt_filter_info *filter; 2265 struct bnxt_vnic_info *vnic; 2266 int ret = 0; 2267 uint32_t flow_id; 2268 2269 filter = flow->filter; 2270 vnic = flow->vnic; 2271 2272 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 2273 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 2274 * in such a case. 2275 */ 2276 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2277 (filter->enables == filter->tunnel_type || 2278 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 2279 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) { 2280 if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) { 2281 struct rte_eth_udp_tunnel tunnel = {0}; 2282 2283 /* hwrm_tunnel_dst_port_free converts to Big Endian */ 2284 tunnel.udp_port = BNXT_NTOHS(filter->dst_port); 2285 if (filter->tunnel_type == 2286 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) { 2287 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN; 2288 } else if (filter->tunnel_type == 2289 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) { 2290 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE; 2291 } else { 2292 rte_flow_error_set(error, EINVAL, 2293 RTE_FLOW_ERROR_TYPE_HANDLE, 2294 NULL, 2295 "Invalid tunnel type"); 2296 return ret; 2297 } 2298 2299 ret = bnxt_udp_tunnel_port_del_op(bp->eth_dev, 2300 &tunnel); 2301 if (ret) 2302 return ret; 2303 } 2304 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); 2305 if (!ret) 2306 goto done; 2307 else 2308 return ret; 2309 } 2310 2311 /* For config type, there is no filter in HW. Finish cleanup here */ 2312 if (filter->filter_type == HWRM_CFA_CONFIG) 2313 goto done; 2314 2315 ret = bnxt_match_filter(bp, filter); 2316 if (ret == 0) 2317 PMD_DRV_LOG(ERR, "Could not find matching flow\n"); 2318 2319 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2320 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2321 memset(&bp->mark_table[flow_id], 0, 2322 sizeof(bp->mark_table[flow_id])); 2323 filter->flow_id = 0; 2324 } 2325 2326 ret = bnxt_clear_one_vnic_filter(bp, filter); 2327 2328 done: 2329 if (!ret) { 2330 /* If it is a L2 drop filter, when the filter is created, 2331 * the FW updates the BC/MC records. 2332 * Once this filter is removed, issue the set_rx_mask command 2333 * to reset the BC/MC records in the HW to the settings 2334 * before the drop counter is created. 2335 */ 2336 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG) 2337 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]); 2338 2339 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); 2340 bnxt_free_filter(bp, filter); 2341 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); 2342 rte_free(flow); 2343 if (BNXT_FLOW_XSTATS_EN(bp)) 2344 bp->flow_stat->flow_count--; 2345 2346 /* If this was the last flow associated with this vnic, 2347 * switch the queue back to RSS pool. 2348 */ 2349 if (vnic && !vnic->func_default && 2350 STAILQ_EMPTY(&vnic->flow_list)) { 2351 bnxt_vnic_cleanup(bp, vnic); 2352 bp->nr_vnics--; 2353 } 2354 } else { 2355 rte_flow_error_set(error, -ret, 2356 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2357 "Failed to destroy flow."); 2358 } 2359 2360 return ret; 2361 } 2362 2363 static int 2364 bnxt_flow_destroy(struct rte_eth_dev *dev, 2365 struct rte_flow *flow, 2366 struct rte_flow_error *error) 2367 { 2368 struct bnxt *bp = dev->data->dev_private; 2369 int ret = 0; 2370 2371 bnxt_acquire_flow_lock(bp); 2372 if (!flow) { 2373 rte_flow_error_set(error, EINVAL, 2374 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2375 "Invalid flow: failed to destroy flow."); 2376 bnxt_release_flow_lock(bp); 2377 return -EINVAL; 2378 } 2379 2380 if (!flow->filter) { 2381 rte_flow_error_set(error, EINVAL, 2382 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2383 "Invalid flow: failed to destroy flow."); 2384 bnxt_release_flow_lock(bp); 2385 return -EINVAL; 2386 } 2387 ret = _bnxt_flow_destroy(bp, flow, error); 2388 bnxt_release_flow_lock(bp); 2389 2390 return ret; 2391 } 2392 2393 void bnxt_cancel_fc_thread(struct bnxt *bp) 2394 { 2395 bp->flags &= ~BNXT_FLAG_FC_THREAD; 2396 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); 2397 } 2398 2399 static int 2400 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) 2401 { 2402 struct bnxt *bp = dev->data->dev_private; 2403 struct bnxt_vnic_info *vnic; 2404 struct rte_flow *flow; 2405 unsigned int i; 2406 int ret = 0; 2407 2408 bnxt_acquire_flow_lock(bp); 2409 for (i = 0; i < bp->max_vnics; i++) { 2410 vnic = &bp->vnic_info[i]; 2411 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) 2412 continue; 2413 2414 while (!STAILQ_EMPTY(&vnic->flow_list)) { 2415 flow = STAILQ_FIRST(&vnic->flow_list); 2416 2417 if (!flow->filter) 2418 continue; 2419 2420 ret = _bnxt_flow_destroy(bp, flow, error); 2421 if (ret) 2422 break; 2423 } 2424 } 2425 2426 bnxt_cancel_fc_thread(bp); 2427 bnxt_release_flow_lock(bp); 2428 2429 return ret; 2430 } 2431 2432 const struct rte_flow_ops bnxt_flow_ops = { 2433 .validate = bnxt_flow_validate, 2434 .create = bnxt_flow_create, 2435 .destroy = bnxt_flow_destroy, 2436 .flush = bnxt_flow_flush, 2437 .query = bnxt_flow_query, 2438 }; 2439