1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 8 #include <rte_log.h> 9 #include <rte_malloc.h> 10 #include <rte_flow.h> 11 #include <rte_flow_driver.h> 12 #include <rte_tailq.h> 13 #include <rte_alarm.h> 14 #include <rte_cycles.h> 15 16 #include "bnxt.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_ring.h" 20 #include "bnxt_rxq.h" 21 #include "bnxt_rxr.h" 22 #include "bnxt_vnic.h" 23 #include "hsi_struct_def_dpdk.h" 24 25 static int 26 bnxt_flow_args_validate(const struct rte_flow_attr *attr, 27 const struct rte_flow_item pattern[], 28 const struct rte_flow_action actions[], 29 struct rte_flow_error *error) 30 { 31 if (!pattern) { 32 rte_flow_error_set(error, 33 EINVAL, 34 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 35 NULL, 36 "NULL pattern."); 37 return -rte_errno; 38 } 39 40 if (!actions) { 41 rte_flow_error_set(error, 42 EINVAL, 43 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 44 NULL, 45 "NULL action."); 46 return -rte_errno; 47 } 48 49 if (!attr) { 50 rte_flow_error_set(error, 51 EINVAL, 52 RTE_FLOW_ERROR_TYPE_ATTR, 53 NULL, 54 "NULL attribute."); 55 return -rte_errno; 56 } 57 58 return 0; 59 } 60 61 static const struct rte_flow_item * 62 bnxt_flow_non_void_item(const struct rte_flow_item *cur) 63 { 64 while (1) { 65 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) 66 return cur; 67 cur++; 68 } 69 } 70 71 static const struct rte_flow_action * 72 bnxt_flow_non_void_action(const struct rte_flow_action *cur) 73 { 74 while (1) { 75 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) 76 return cur; 77 cur++; 78 } 79 } 80 81 static int 82 bnxt_filter_type_check(const struct rte_flow_item pattern[], 83 struct rte_flow_error *error) 84 { 85 const struct rte_flow_item *item = 86 bnxt_flow_non_void_item(pattern); 87 int use_ntuple = 1; 88 bool has_vlan = 0; 89 90 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 91 switch (item->type) { 92 case RTE_FLOW_ITEM_TYPE_ANY: 93 case RTE_FLOW_ITEM_TYPE_ETH: 94 use_ntuple = 0; 95 break; 96 case RTE_FLOW_ITEM_TYPE_VLAN: 97 use_ntuple = 0; 98 has_vlan = 1; 99 break; 100 case RTE_FLOW_ITEM_TYPE_IPV4: 101 case RTE_FLOW_ITEM_TYPE_IPV6: 102 case RTE_FLOW_ITEM_TYPE_TCP: 103 case RTE_FLOW_ITEM_TYPE_UDP: 104 /* FALLTHROUGH */ 105 /* need ntuple match, reset exact match */ 106 use_ntuple |= 1; 107 break; 108 default: 109 PMD_DRV_LOG_LINE(DEBUG, "Unknown Flow type"); 110 use_ntuple |= 0; 111 } 112 item++; 113 } 114 115 if (has_vlan && use_ntuple) { 116 PMD_DRV_LOG_LINE(ERR, 117 "VLAN flow cannot use NTUPLE filter"); 118 rte_flow_error_set(error, EINVAL, 119 RTE_FLOW_ERROR_TYPE_ITEM, 120 item, 121 "Cannot use VLAN with NTUPLE"); 122 return -rte_errno; 123 } 124 125 return use_ntuple; 126 } 127 128 static int 129 bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr, 130 const struct rte_flow_item pattern[], 131 struct rte_flow_error *error, 132 struct bnxt_filter_info *filter) 133 { 134 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); 135 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; 136 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; 137 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; 138 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; 139 const struct rte_flow_item_udp *udp_spec, *udp_mask; 140 const struct rte_flow_item_eth *eth_spec, *eth_mask; 141 const struct rte_ether_addr *dst, *src; 142 const struct rte_flow_item_nvgre *nvgre_spec; 143 const struct rte_flow_item_nvgre *nvgre_mask; 144 const struct rte_flow_item_gre *gre_spec; 145 const struct rte_flow_item_gre *gre_mask; 146 const struct rte_flow_item_vxlan *vxlan_spec; 147 const struct rte_flow_item_vxlan *vxlan_mask; 148 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; 149 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; 150 uint32_t tenant_id_be = 0, valid_flags = 0; 151 bool vni_masked = 0; 152 bool tni_masked = 0; 153 uint32_t en_ethertype; 154 uint8_t inner = 0; 155 uint32_t en = 0; 156 int use_ntuple; 157 158 use_ntuple = bnxt_filter_type_check(pattern, error); 159 if (use_ntuple < 0) 160 return use_ntuple; 161 PMD_DRV_LOG_LINE(DEBUG, "Use NTUPLE %d", use_ntuple); 162 163 filter->filter_type = use_ntuple ? 164 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER; 165 en_ethertype = use_ntuple ? 166 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : 167 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; 168 169 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 170 if (item->last) { 171 /* last or range is NOT supported as match criteria */ 172 rte_flow_error_set(error, EINVAL, 173 RTE_FLOW_ERROR_TYPE_ITEM, 174 item, 175 "No support for range"); 176 return -rte_errno; 177 } 178 179 switch (item->type) { 180 case RTE_FLOW_ITEM_TYPE_ANY: 181 inner = 182 ((const struct rte_flow_item_any *)item->spec)->num > 3; 183 if (inner) 184 PMD_DRV_LOG_LINE(DEBUG, "Parse inner header"); 185 break; 186 case RTE_FLOW_ITEM_TYPE_ETH: 187 if (!item->spec) 188 break; 189 190 eth_spec = item->spec; 191 192 if (item->mask) 193 eth_mask = item->mask; 194 else 195 eth_mask = &rte_flow_item_eth_mask; 196 197 /* Source MAC address mask cannot be partially set. 198 * Should be All 0's or all 1's. 199 * Destination MAC address mask must not be partially 200 * set. Should be all 1's or all 0's. 201 */ 202 if ((!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) && 203 !rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) || 204 (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) && 205 !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) { 206 rte_flow_error_set(error, 207 EINVAL, 208 RTE_FLOW_ERROR_TYPE_ITEM, 209 item, 210 "MAC_addr mask not valid"); 211 return -rte_errno; 212 } 213 214 /* Mask is not allowed. Only exact matches are */ 215 if (eth_mask->hdr.ether_type && 216 eth_mask->hdr.ether_type != RTE_BE16(0xffff)) { 217 rte_flow_error_set(error, EINVAL, 218 RTE_FLOW_ERROR_TYPE_ITEM, 219 item, 220 "ethertype mask not valid"); 221 return -rte_errno; 222 } 223 224 if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) { 225 dst = ð_spec->hdr.dst_addr; 226 if (!rte_is_valid_assigned_ether_addr(dst)) { 227 rte_flow_error_set(error, 228 EINVAL, 229 RTE_FLOW_ERROR_TYPE_ITEM, 230 item, 231 "DMAC is invalid"); 232 PMD_DRV_LOG_LINE(ERR, 233 "DMAC is invalid!"); 234 return -rte_errno; 235 } 236 rte_memcpy(filter->dst_macaddr, 237 ð_spec->hdr.dst_addr, RTE_ETHER_ADDR_LEN); 238 en |= use_ntuple ? 239 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : 240 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; 241 valid_flags |= inner ? 242 BNXT_FLOW_L2_INNER_DST_VALID_FLAG : 243 BNXT_FLOW_L2_DST_VALID_FLAG; 244 filter->priority = attr->priority; 245 PMD_DRV_LOG_LINE(DEBUG, 246 "Creating a priority flow"); 247 } 248 if (rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) { 249 src = ð_spec->hdr.src_addr; 250 if (!rte_is_valid_assigned_ether_addr(src)) { 251 rte_flow_error_set(error, 252 EINVAL, 253 RTE_FLOW_ERROR_TYPE_ITEM, 254 item, 255 "SMAC is invalid"); 256 PMD_DRV_LOG_LINE(ERR, 257 "SMAC is invalid!"); 258 return -rte_errno; 259 } 260 rte_memcpy(filter->src_macaddr, 261 ð_spec->hdr.src_addr, RTE_ETHER_ADDR_LEN); 262 en |= use_ntuple ? 263 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : 264 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; 265 valid_flags |= inner ? 266 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG : 267 BNXT_FLOW_L2_SRC_VALID_FLAG; 268 } /* 269 * else { 270 * PMD_DRV_LOG_LINE(ERR, "Handle this condition"); 271 * } 272 */ 273 if (eth_mask->hdr.ether_type) { 274 filter->ethertype = 275 rte_be_to_cpu_16(eth_spec->hdr.ether_type); 276 en |= en_ethertype; 277 } 278 if (inner) 279 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG; 280 281 break; 282 case RTE_FLOW_ITEM_TYPE_VLAN: 283 vlan_spec = item->spec; 284 285 if (item->mask) 286 vlan_mask = item->mask; 287 else 288 vlan_mask = &rte_flow_item_vlan_mask; 289 290 if (en & en_ethertype) { 291 rte_flow_error_set(error, EINVAL, 292 RTE_FLOW_ERROR_TYPE_ITEM, 293 item, 294 "VLAN TPID matching is not" 295 " supported"); 296 return -rte_errno; 297 } 298 if (vlan_mask->hdr.vlan_tci && 299 vlan_mask->hdr.vlan_tci == RTE_BE16(0x0fff)) { 300 /* Only the VLAN ID can be matched. */ 301 filter->l2_ovlan = 302 rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci & 303 RTE_BE16(0x0fff)); 304 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 305 } else { 306 rte_flow_error_set(error, 307 EINVAL, 308 RTE_FLOW_ERROR_TYPE_ITEM, 309 item, 310 "VLAN mask is invalid"); 311 return -rte_errno; 312 } 313 if (vlan_mask->hdr.eth_proto && 314 vlan_mask->hdr.eth_proto != RTE_BE16(0xffff)) { 315 rte_flow_error_set(error, EINVAL, 316 RTE_FLOW_ERROR_TYPE_ITEM, 317 item, 318 "inner ethertype mask not" 319 " valid"); 320 return -rte_errno; 321 } 322 if (vlan_mask->hdr.eth_proto) { 323 filter->ethertype = 324 rte_be_to_cpu_16(vlan_spec->hdr.eth_proto); 325 en |= en_ethertype; 326 } 327 328 break; 329 case RTE_FLOW_ITEM_TYPE_IPV4: 330 /* If mask is not involved, we could use EM filters. */ 331 ipv4_spec = item->spec; 332 333 if (!item->spec) 334 break; 335 336 if (item->mask) 337 ipv4_mask = item->mask; 338 else 339 ipv4_mask = &rte_flow_item_ipv4_mask; 340 341 /* Only IP DST and SRC fields are maskable. */ 342 if (ipv4_mask->hdr.version_ihl || 343 ipv4_mask->hdr.type_of_service || 344 ipv4_mask->hdr.total_length || 345 ipv4_mask->hdr.packet_id || 346 ipv4_mask->hdr.fragment_offset || 347 ipv4_mask->hdr.time_to_live || 348 ipv4_mask->hdr.next_proto_id || 349 ipv4_mask->hdr.hdr_checksum) { 350 rte_flow_error_set(error, 351 EINVAL, 352 RTE_FLOW_ERROR_TYPE_ITEM, 353 item, 354 "Invalid IPv4 mask."); 355 return -rte_errno; 356 } 357 358 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; 359 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; 360 361 if (use_ntuple) 362 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 363 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 364 else 365 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 366 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 367 368 if (ipv4_mask->hdr.src_addr) { 369 filter->src_ipaddr_mask[0] = 370 ipv4_mask->hdr.src_addr; 371 en |= !use_ntuple ? 0 : 372 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 373 } 374 375 if (ipv4_mask->hdr.dst_addr) { 376 filter->dst_ipaddr_mask[0] = 377 ipv4_mask->hdr.dst_addr; 378 en |= !use_ntuple ? 0 : 379 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 380 } 381 382 filter->ip_addr_type = use_ntuple ? 383 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : 384 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 385 386 if (ipv4_spec->hdr.next_proto_id) { 387 filter->ip_protocol = 388 ipv4_spec->hdr.next_proto_id; 389 if (use_ntuple) 390 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 391 else 392 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; 393 } 394 break; 395 case RTE_FLOW_ITEM_TYPE_IPV6: 396 ipv6_spec = item->spec; 397 398 if (!item->spec) 399 break; 400 401 if (item->mask) 402 ipv6_mask = item->mask; 403 else 404 ipv6_mask = &rte_flow_item_ipv6_mask; 405 406 /* Only IP DST and SRC fields are maskable. */ 407 if (ipv6_mask->hdr.vtc_flow || 408 ipv6_mask->hdr.payload_len || 409 ipv6_mask->hdr.proto || 410 ipv6_mask->hdr.hop_limits) { 411 rte_flow_error_set(error, 412 EINVAL, 413 RTE_FLOW_ERROR_TYPE_ITEM, 414 item, 415 "Invalid IPv6 mask."); 416 return -rte_errno; 417 } 418 419 if (use_ntuple) 420 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 421 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 422 else 423 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 424 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 425 426 rte_memcpy(filter->src_ipaddr, 427 &ipv6_spec->hdr.src_addr, 16); 428 rte_memcpy(filter->dst_ipaddr, 429 &ipv6_spec->hdr.dst_addr, 16); 430 431 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr.a, 432 16)) { 433 rte_memcpy(filter->src_ipaddr_mask, 434 &ipv6_mask->hdr.src_addr, 16); 435 en |= !use_ntuple ? 0 : 436 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 437 } 438 439 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr.a, 440 16)) { 441 rte_memcpy(filter->dst_ipaddr_mask, 442 &ipv6_mask->hdr.dst_addr, 16); 443 en |= !use_ntuple ? 0 : 444 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 445 } 446 447 filter->ip_addr_type = use_ntuple ? 448 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : 449 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 450 break; 451 case RTE_FLOW_ITEM_TYPE_TCP: 452 tcp_spec = item->spec; 453 454 if (!item->spec) 455 break; 456 457 if (item->mask) 458 tcp_mask = item->mask; 459 else 460 tcp_mask = &rte_flow_item_tcp_mask; 461 462 /* Check TCP mask. Only DST & SRC ports are maskable */ 463 if (tcp_mask->hdr.sent_seq || 464 tcp_mask->hdr.recv_ack || 465 tcp_mask->hdr.data_off || 466 tcp_mask->hdr.tcp_flags || 467 tcp_mask->hdr.rx_win || 468 tcp_mask->hdr.cksum || 469 tcp_mask->hdr.tcp_urp) { 470 rte_flow_error_set(error, 471 EINVAL, 472 RTE_FLOW_ERROR_TYPE_ITEM, 473 item, 474 "Invalid TCP mask"); 475 return -rte_errno; 476 } 477 478 filter->src_port = tcp_spec->hdr.src_port; 479 filter->dst_port = tcp_spec->hdr.dst_port; 480 481 if (use_ntuple) 482 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 483 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 484 else 485 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 486 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 487 488 if (tcp_mask->hdr.dst_port) { 489 filter->dst_port_mask = tcp_mask->hdr.dst_port; 490 en |= !use_ntuple ? 0 : 491 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 492 } 493 494 if (tcp_mask->hdr.src_port) { 495 filter->src_port_mask = tcp_mask->hdr.src_port; 496 en |= !use_ntuple ? 0 : 497 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 498 } 499 break; 500 case RTE_FLOW_ITEM_TYPE_UDP: 501 udp_spec = item->spec; 502 503 if (!item->spec) 504 break; 505 506 if (item->mask) 507 udp_mask = item->mask; 508 else 509 udp_mask = &rte_flow_item_udp_mask; 510 511 if (udp_mask->hdr.dgram_len || 512 udp_mask->hdr.dgram_cksum) { 513 rte_flow_error_set(error, 514 EINVAL, 515 RTE_FLOW_ERROR_TYPE_ITEM, 516 item, 517 "Invalid UDP mask"); 518 return -rte_errno; 519 } 520 521 filter->src_port = udp_spec->hdr.src_port; 522 filter->dst_port = udp_spec->hdr.dst_port; 523 524 if (use_ntuple) 525 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 526 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 527 else 528 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 529 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 530 531 if (udp_mask->hdr.dst_port) { 532 filter->dst_port_mask = udp_mask->hdr.dst_port; 533 en |= !use_ntuple ? 0 : 534 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 535 } 536 537 if (udp_mask->hdr.src_port) { 538 filter->src_port_mask = udp_mask->hdr.src_port; 539 en |= !use_ntuple ? 0 : 540 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 541 } 542 break; 543 case RTE_FLOW_ITEM_TYPE_VXLAN: 544 vxlan_spec = item->spec; 545 vxlan_mask = item->mask; 546 /* Check if VXLAN item is used to describe protocol. 547 * If yes, both spec and mask should be NULL. 548 * If no, both spec and mask shouldn't be NULL. 549 */ 550 if ((!vxlan_spec && vxlan_mask) || 551 (vxlan_spec && !vxlan_mask)) { 552 rte_flow_error_set(error, 553 EINVAL, 554 RTE_FLOW_ERROR_TYPE_ITEM, 555 item, 556 "Invalid VXLAN item"); 557 return -rte_errno; 558 } 559 560 if (!vxlan_spec && !vxlan_mask) { 561 filter->tunnel_type = 562 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 563 break; 564 } 565 566 if ((vxlan_spec->hdr.rsvd0[0] != 0) || 567 (vxlan_spec->hdr.rsvd0[1] != 0) || 568 (vxlan_spec->hdr.rsvd0[2] != 0) || 569 (vxlan_spec->hdr.rsvd1 != 0) || 570 (vxlan_spec->hdr.flags != 8)) { 571 rte_flow_error_set(error, 572 EINVAL, 573 RTE_FLOW_ERROR_TYPE_ITEM, 574 item, 575 "Invalid VXLAN item"); 576 return -rte_errno; 577 } 578 579 /* Check if VNI is masked. */ 580 if (vxlan_mask != NULL) { 581 vni_masked = 582 !!memcmp(vxlan_mask->hdr.vni, vni_mask, 583 RTE_DIM(vni_mask)); 584 if (vni_masked) { 585 rte_flow_error_set 586 (error, 587 EINVAL, 588 RTE_FLOW_ERROR_TYPE_ITEM, 589 item, 590 "Invalid VNI mask"); 591 return -rte_errno; 592 } 593 594 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 595 vxlan_spec->hdr.vni, 3); 596 filter->vni = 597 rte_be_to_cpu_32(tenant_id_be); 598 filter->tunnel_type = 599 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 600 } 601 break; 602 case RTE_FLOW_ITEM_TYPE_NVGRE: 603 nvgre_spec = item->spec; 604 nvgre_mask = item->mask; 605 /* Check if NVGRE item is used to describe protocol. 606 * If yes, both spec and mask should be NULL. 607 * If no, both spec and mask shouldn't be NULL. 608 */ 609 if ((!nvgre_spec && nvgre_mask) || 610 (nvgre_spec && !nvgre_mask)) { 611 rte_flow_error_set(error, 612 EINVAL, 613 RTE_FLOW_ERROR_TYPE_ITEM, 614 item, 615 "Invalid NVGRE item"); 616 return -rte_errno; 617 } 618 619 if (!nvgre_spec && !nvgre_mask) { 620 filter->tunnel_type = 621 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 622 break; 623 } 624 625 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || 626 nvgre_spec->protocol != 0x6558) { 627 rte_flow_error_set(error, 628 EINVAL, 629 RTE_FLOW_ERROR_TYPE_ITEM, 630 item, 631 "Invalid NVGRE item"); 632 return -rte_errno; 633 } 634 635 if (nvgre_spec && nvgre_mask) { 636 tni_masked = 637 !!memcmp(nvgre_mask->tni, tni_mask, 638 RTE_DIM(tni_mask)); 639 if (tni_masked) { 640 rte_flow_error_set 641 (error, 642 EINVAL, 643 RTE_FLOW_ERROR_TYPE_ITEM, 644 item, 645 "Invalid TNI mask"); 646 return -rte_errno; 647 } 648 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 649 nvgre_spec->tni, 3); 650 filter->vni = 651 rte_be_to_cpu_32(tenant_id_be); 652 filter->tunnel_type = 653 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 654 } 655 break; 656 657 case RTE_FLOW_ITEM_TYPE_GRE: 658 gre_spec = (const struct rte_flow_item_gre *)item->spec; 659 gre_mask = (const struct rte_flow_item_gre *)item->mask; 660 661 /* 662 *Check if GRE item is used to describe protocol. 663 * If yes, both spec and mask should be NULL. 664 * If no, both spec and mask shouldn't be NULL. 665 */ 666 if (!!gre_spec ^ !!gre_mask) { 667 rte_flow_error_set(error, EINVAL, 668 RTE_FLOW_ERROR_TYPE_ITEM, 669 item, 670 "Invalid GRE item"); 671 return -rte_errno; 672 } 673 674 if (!gre_spec && !gre_mask) { 675 filter->tunnel_type = 676 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE; 677 break; 678 } 679 break; 680 default: 681 break; 682 } 683 item++; 684 } 685 filter->enables = en; 686 filter->valid_flags = valid_flags; 687 688 /* Items parsed but no filter to create in HW. */ 689 if (filter->enables == 0 && filter->valid_flags == 0) 690 filter->filter_type = HWRM_CFA_CONFIG; 691 692 return 0; 693 } 694 695 /* Parse attributes */ 696 static int 697 bnxt_flow_parse_attr(const struct rte_flow_attr *attr, 698 struct rte_flow_error *error) 699 { 700 /* Must be input direction */ 701 if (!attr->ingress) { 702 rte_flow_error_set(error, 703 EINVAL, 704 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 705 attr, 706 "Only support ingress."); 707 return -rte_errno; 708 } 709 710 /* Not supported */ 711 if (attr->egress) { 712 rte_flow_error_set(error, 713 EINVAL, 714 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 715 attr, 716 "No support for egress."); 717 return -rte_errno; 718 } 719 720 return 0; 721 } 722 723 static struct bnxt_filter_info * 724 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 725 { 726 struct bnxt_filter_info *mf, *f0; 727 struct bnxt_vnic_info *vnic0; 728 int i; 729 730 vnic0 = bnxt_get_default_vnic(bp); 731 f0 = STAILQ_FIRST(&vnic0->filter); 732 733 /* This flow has same DST MAC as the port/l2 filter. */ 734 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0) 735 return f0; 736 737 for (i = bp->max_vnics - 1; i >= 0; i--) { 738 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 739 740 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 741 continue; 742 743 STAILQ_FOREACH(mf, &vnic->filter, next) { 744 745 if (mf->matching_l2_fltr_ptr) 746 continue; 747 748 if (mf->ethertype == nf->ethertype && 749 mf->l2_ovlan == nf->l2_ovlan && 750 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 751 mf->l2_ivlan == nf->l2_ivlan && 752 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 753 !memcmp(mf->src_macaddr, nf->src_macaddr, 754 RTE_ETHER_ADDR_LEN) && 755 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 756 RTE_ETHER_ADDR_LEN)) 757 return mf; 758 } 759 } 760 return NULL; 761 } 762 763 static struct bnxt_filter_info * 764 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 765 struct bnxt_vnic_info *vnic) 766 { 767 struct bnxt_filter_info *filter1; 768 int rc; 769 770 /* Alloc new L2 filter. 771 * This flow needs MAC filter which does not match any existing 772 * L2 filters. 773 */ 774 filter1 = bnxt_get_unused_filter(bp); 775 if (filter1 == NULL) 776 return NULL; 777 778 memcpy(filter1, nf, sizeof(*filter1)); 779 780 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; 781 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 782 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 783 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) { 784 filter1->flags |= 785 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 786 PMD_DRV_LOG_LINE(DEBUG, "Create Outer filter"); 787 } 788 789 if (nf->filter_type == HWRM_CFA_L2_FILTER && 790 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 791 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) { 792 PMD_DRV_LOG_LINE(DEBUG, "Create L2 filter for SRC MAC"); 793 filter1->flags |= 794 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID; 795 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN); 796 } else { 797 PMD_DRV_LOG_LINE(DEBUG, "Create L2 filter for DST MAC"); 798 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN); 799 } 800 801 if (nf->priority && 802 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG || 803 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 804 /* Tell the FW where to place the filter in the table. */ 805 if (nf->priority > 65535) { 806 filter1->pri_hint = 807 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER; 808 /* This will place the filter in TCAM */ 809 filter1->l2_filter_id_hint = (uint64_t)-1; 810 } 811 } 812 813 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG | 814 BNXT_FLOW_L2_SRC_VALID_FLAG | 815 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 816 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 817 filter1->enables = 818 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 819 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; 820 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 821 } 822 823 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) { 824 filter1->flags |= 825 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP; 826 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) { 827 /* Num VLANs for drop filter will/should be 0. 828 * If the req is memset to 0, then the count will 829 * be automatically set to 0. 830 */ 831 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) { 832 filter1->enables |= 833 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS; 834 } else { 835 filter1->enables |= 836 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS; 837 filter1->flags |= 838 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 839 } 840 } 841 } 842 843 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, 844 filter1); 845 if (rc) { 846 bnxt_free_filter(bp, filter1); 847 return NULL; 848 } 849 return filter1; 850 } 851 852 struct bnxt_filter_info * 853 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 854 struct bnxt_vnic_info *vnic) 855 { 856 struct bnxt_filter_info *l2_filter = NULL; 857 858 l2_filter = bnxt_find_matching_l2_filter(bp, nf); 859 if (l2_filter) { 860 l2_filter->l2_ref_cnt++; 861 } else { 862 l2_filter = bnxt_create_l2_filter(bp, nf, vnic); 863 if (l2_filter) { 864 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); 865 l2_filter->vnic = vnic; 866 } 867 } 868 nf->matching_l2_fltr_ptr = l2_filter; 869 870 return l2_filter; 871 } 872 873 static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic) 874 { 875 if (vnic->rx_queue_cnt > 1) 876 bnxt_hwrm_vnic_ctx_free(bp, vnic); 877 878 bnxt_hwrm_vnic_free(bp, vnic); 879 880 rte_free(vnic->fw_grp_ids); 881 vnic->fw_grp_ids = NULL; 882 883 vnic->rx_queue_cnt = 0; 884 vnic->hash_type = 0; 885 } 886 887 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic, 888 const struct rte_flow_action *act, 889 struct rte_flow_error *error) 890 { 891 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 892 uint64_t rx_offloads = dev_conf->rxmode.offloads; 893 int rc; 894 895 if (bp->nr_vnics > bp->max_vnics - 1) 896 return rte_flow_error_set(error, EINVAL, 897 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 898 NULL, 899 "Group id is invalid"); 900 901 rc = bnxt_vnic_grp_alloc(bp, vnic); 902 if (rc) 903 return rte_flow_error_set(error, -rc, 904 RTE_FLOW_ERROR_TYPE_ACTION, 905 act, 906 "Failed to alloc VNIC group"); 907 908 /* populate the fw group table */ 909 bnxt_vnic_ring_grp_populate(bp, vnic); 910 bnxt_vnic_rules_init(vnic); 911 912 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 913 if (rc) { 914 rte_flow_error_set(error, -rc, 915 RTE_FLOW_ERROR_TYPE_ACTION, 916 act, 917 "Failed to alloc VNIC"); 918 goto ret; 919 } 920 921 /* RSS context is required only when there is more than one RSS ring */ 922 if (vnic->rx_queue_cnt > 1) { 923 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 924 if (rc) { 925 rte_flow_error_set(error, -rc, 926 RTE_FLOW_ERROR_TYPE_ACTION, 927 act, 928 "Failed to alloc VNIC context"); 929 goto ret; 930 } 931 } 932 933 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 934 vnic->vlan_strip = true; 935 else 936 vnic->vlan_strip = false; 937 938 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 939 if (rc) { 940 rte_flow_error_set(error, -rc, 941 RTE_FLOW_ERROR_TYPE_ACTION, 942 act, 943 "Failed to configure VNIC"); 944 goto ret; 945 } 946 947 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 948 if (rc) { 949 rte_flow_error_set(error, -rc, 950 RTE_FLOW_ERROR_TYPE_ACTION, 951 act, 952 "Failed to configure VNIC plcmode"); 953 goto ret; 954 } 955 956 bp->nr_vnics++; 957 958 return 0; 959 960 ret: 961 bnxt_vnic_cleanup(bp, vnic); 962 return rc; 963 } 964 965 static int match_vnic_rss_cfg(struct bnxt *bp, 966 struct bnxt_vnic_info *vnic, 967 const struct rte_flow_action_rss *rss) 968 { 969 unsigned int match = 0, i; 970 971 if (vnic->rx_queue_cnt != rss->queue_num) 972 return -EINVAL; 973 974 for (i = 0; i < rss->queue_num; i++) { 975 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt && 976 !bp->rx_queues[rss->queue[i]]->rx_started) 977 return -EINVAL; 978 } 979 980 for (i = 0; i < vnic->rx_queue_cnt; i++) { 981 int j; 982 983 for (j = 0; j < vnic->rx_queue_cnt; j++) { 984 if (bp->grp_info[rss->queue[i]].fw_grp_id == 985 vnic->fw_grp_ids[j]) 986 match++; 987 } 988 } 989 990 if (match != vnic->rx_queue_cnt) { 991 PMD_DRV_LOG_LINE(ERR, 992 "VNIC queue count %d vs queues matched %d", 993 match, vnic->rx_queue_cnt); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 static void 1001 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, 1002 struct bnxt_filter_info *filter1, 1003 int use_ntuple) 1004 { 1005 if (!use_ntuple && 1006 !(filter->valid_flags & 1007 ~(BNXT_FLOW_L2_DST_VALID_FLAG | 1008 BNXT_FLOW_L2_SRC_VALID_FLAG | 1009 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 1010 BNXT_FLOW_L2_INNER_DST_VALID_FLAG | 1011 BNXT_FLOW_L2_DROP_FLAG | 1012 BNXT_FLOW_PARSE_INNER_FLAG))) { 1013 filter->flags = filter1->flags; 1014 filter->enables = filter1->enables; 1015 filter->filter_type = HWRM_CFA_L2_FILTER; 1016 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN); 1017 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1018 filter->pri_hint = filter1->pri_hint; 1019 filter->l2_filter_id_hint = filter1->l2_filter_id_hint; 1020 } 1021 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1022 filter->l2_ref_cnt = filter1->l2_ref_cnt; 1023 filter->flow_id = filter1->flow_id; 1024 PMD_DRV_LOG_LINE(DEBUG, 1025 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u", 1026 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); 1027 } 1028 1029 static int 1030 bnxt_validate_rss_action(const struct rte_flow_action actions[]) 1031 { 1032 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1033 switch (actions->type) { 1034 case RTE_FLOW_ACTION_TYPE_VOID: 1035 break; 1036 case RTE_FLOW_ACTION_TYPE_RSS: 1037 break; 1038 default: 1039 return -ENOTSUP; 1040 } 1041 } 1042 1043 return 0; 1044 } 1045 1046 static int 1047 bnxt_get_vnic(struct bnxt *bp, uint32_t group) 1048 { 1049 int vnic_id = 0; 1050 1051 /* For legacy NS3 based implementations, 1052 * group_id will be mapped to a VNIC ID. 1053 */ 1054 if (BNXT_STINGRAY(bp)) 1055 vnic_id = group; 1056 1057 /* Non NS3 cases, group_id will be ignored. 1058 * Setting will be configured on default VNIC. 1059 */ 1060 return vnic_id; 1061 } 1062 1063 static int 1064 bnxt_vnic_rss_cfg_update(struct bnxt *bp, 1065 struct bnxt_vnic_info *vnic, 1066 const struct rte_flow_action *act, 1067 struct rte_flow_error *error) 1068 { 1069 const struct rte_flow_action_rss *rss; 1070 unsigned int rss_idx, i, j, fw_idx; 1071 uint32_t hash_type; 1072 uint64_t types; 1073 int rc; 1074 1075 rss = (const struct rte_flow_action_rss *)act->conf; 1076 1077 /* must specify either all the Rx queues created by application or zero queues */ 1078 if (rss->queue_num && vnic->rx_queue_cnt != rss->queue_num) { 1079 rte_flow_error_set(error, 1080 EINVAL, 1081 RTE_FLOW_ERROR_TYPE_ACTION, 1082 act, 1083 "Incorrect RXQ count"); 1084 rc = -rte_errno; 1085 goto ret; 1086 } 1087 1088 /* Validate Rx queues */ 1089 for (i = 0; i < rss->queue_num; i++) { 1090 PMD_DRV_LOG_LINE(DEBUG, "RSS action Queue %d", rss->queue[i]); 1091 1092 if (rss->queue[i] >= bp->rx_nr_rings || 1093 !bp->rx_queues[rss->queue[i]]) { 1094 rte_flow_error_set(error, 1095 EINVAL, 1096 RTE_FLOW_ERROR_TYPE_ACTION, 1097 act, 1098 "Invalid queue ID for RSS"); 1099 rc = -rte_errno; 1100 goto ret; 1101 } 1102 } 1103 1104 /* Duplicate queue ids are not supported. */ 1105 for (i = 0; i < rss->queue_num; i++) { 1106 for (j = i + 1; j < rss->queue_num; j++) { 1107 if (rss->queue[i] == rss->queue[j]) { 1108 rte_flow_error_set(error, 1109 EINVAL, 1110 RTE_FLOW_ERROR_TYPE_ACTION, 1111 act, 1112 "Duplicate queue ID for RSS"); 1113 rc = -rte_errno; 1114 goto ret; 1115 } 1116 } 1117 } 1118 1119 if (BNXT_IS_HASH_FUNC_DEFAULT(rss->func) && 1120 BNXT_IS_HASH_FUNC_TOEPLITZ(rss->func) && 1121 BNXT_IS_HASH_FUNC_SIMPLE_XOR(bp, rss->func)) { 1122 rte_flow_error_set(error, 1123 ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ACTION, 1125 act, 1126 "Unsupported RSS hash function"); 1127 rc = -rte_errno; 1128 goto ret; 1129 } 1130 1131 /* key_len should match the hash key supported by hardware */ 1132 if (rss->key_len != 0 && rss->key_len != HW_HASH_KEY_SIZE) { 1133 rte_flow_error_set(error, 1134 EINVAL, 1135 RTE_FLOW_ERROR_TYPE_ACTION, 1136 act, 1137 "Incorrect hash key parameters"); 1138 rc = -rte_errno; 1139 goto ret; 1140 } 1141 1142 /* Currently RSS hash on inner and outer headers are supported. 1143 * 0 => Default (innermost RSS) setting 1144 * 1 => Outermost 1145 */ 1146 if (rss->level > 1) { 1147 rte_flow_error_set(error, 1148 ENOTSUP, 1149 RTE_FLOW_ERROR_TYPE_ACTION, 1150 act, 1151 "Unsupported hash level"); 1152 rc = -rte_errno; 1153 goto ret; 1154 } 1155 1156 if ((rss->queue_num == 0 && rss->queue != NULL) || 1157 (rss->queue_num != 0 && rss->queue == NULL)) { 1158 rte_flow_error_set(error, 1159 EINVAL, 1160 RTE_FLOW_ERROR_TYPE_ACTION, 1161 act, 1162 "Invalid queue config specified"); 1163 rc = -rte_errno; 1164 goto ret; 1165 } 1166 1167 /* If RSS types is 0, use a best effort configuration */ 1168 types = rss->types ? rss->types : RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6; 1169 1170 hash_type = bnxt_rte_to_hwrm_hash_types(types); 1171 1172 /* If requested types can't be supported, leave existing settings */ 1173 if (hash_type) 1174 vnic->hash_type = hash_type; 1175 1176 vnic->hash_mode = 1177 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1178 1179 /* For P7 chips update the hash_type if hash_type not explicitly passed. 1180 * TODO: For P5 chips. 1181 */ 1182 if (BNXT_CHIP_P7(bp) && 1183 vnic->hash_mode == BNXT_HASH_MODE_DEFAULT && !hash_type) 1184 vnic->hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | 1185 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1186 1187 /* TODO: 1188 * hash will be performed on the L3 and L4 packet headers. 1189 * specific RSS hash types like IPv4-TCP etc... or L4-chksum or IPV4-chksum 1190 * will NOT have any bearing and will not be honored. 1191 * Check and reject flow create accordingly. TODO. 1192 */ 1193 1194 rc = bnxt_rte_flow_to_hwrm_ring_select_mode(rss->func, 1195 rss->types, 1196 bp, vnic); 1197 if (rc) { 1198 rte_flow_error_set(error, 1199 ENOTSUP, 1200 RTE_FLOW_ERROR_TYPE_ACTION, 1201 act, 1202 "Unsupported RSS hash parameters"); 1203 rc = -rte_errno; 1204 goto ret; 1205 } 1206 1207 /* Update RSS key only if key_len != 0 */ 1208 if (rss->key_len != 0) 1209 memcpy(vnic->rss_hash_key, rss->key, rss->key_len); 1210 1211 if (rss->queue_num == 0) 1212 goto skip_rss_table; 1213 1214 /* Prepare the indirection table */ 1215 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; 1216 rss_idx++, fw_idx++) { 1217 uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state; 1218 struct bnxt_rx_queue *rxq; 1219 uint32_t idx; 1220 1221 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 1222 idx = rss->queue[fw_idx % rss->queue_num]; 1223 if (rxq_state[idx] != RTE_ETH_QUEUE_STATE_STOPPED) 1224 break; 1225 fw_idx++; 1226 } 1227 1228 if (i == bp->rx_cp_nr_rings) 1229 return 0; 1230 1231 if (BNXT_CHIP_P5_P7(bp)) { 1232 rxq = bp->rx_queues[idx]; 1233 vnic->rss_table[rss_idx * 2] = 1234 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1235 vnic->rss_table[rss_idx * 2 + 1] = 1236 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1237 } else { 1238 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[idx]; 1239 } 1240 } 1241 1242 skip_rss_table: 1243 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1244 if (rc != 0) { 1245 rte_flow_error_set(error, 1246 -rc, 1247 RTE_FLOW_ERROR_TYPE_ACTION, 1248 act, 1249 "VNIC RSS configure failed"); 1250 vnic->rss_types_local = 0; 1251 rc = -rte_errno; 1252 goto ret; 1253 } 1254 ret: 1255 return rc; 1256 } 1257 1258 static int 1259 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, 1260 const struct rte_flow_item pattern[], 1261 const struct rte_flow_action actions[], 1262 const struct rte_flow_attr *attr, 1263 struct rte_flow_error *error, 1264 struct bnxt_filter_info *filter) 1265 { 1266 const struct rte_flow_action *act = 1267 bnxt_flow_non_void_action(actions); 1268 struct bnxt *bp = dev->data->dev_private; 1269 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1270 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL; 1271 const struct rte_flow_action_queue *act_q; 1272 const struct rte_flow_action_vf *act_vf; 1273 struct bnxt_filter_info *filter1 = NULL; 1274 const struct rte_flow_action_rss *rss; 1275 struct bnxt_rx_queue *rxq = NULL; 1276 int dflt_vnic, vnic_id; 1277 unsigned int rss_idx; 1278 uint32_t vf = 0, i; 1279 int rc, use_ntuple; 1280 1281 rc = 1282 bnxt_validate_and_parse_flow_type(attr, pattern, error, filter); 1283 if (rc != 0) 1284 goto ret; 1285 1286 rc = bnxt_flow_parse_attr(attr, error); 1287 if (rc != 0) 1288 goto ret; 1289 1290 /* Since we support ingress attribute only - right now. */ 1291 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1292 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; 1293 1294 use_ntuple = bnxt_filter_type_check(pattern, error); 1295 1296 start: 1297 switch (act->type) { 1298 case RTE_FLOW_ACTION_TYPE_QUEUE: 1299 /* Allow this flow. Redirect to a VNIC. */ 1300 act_q = (const struct rte_flow_action_queue *)act->conf; 1301 if (!act_q->index || act_q->index >= bp->rx_nr_rings) { 1302 rte_flow_error_set(error, 1303 EINVAL, 1304 RTE_FLOW_ERROR_TYPE_ACTION, 1305 act, 1306 "Invalid queue ID."); 1307 rc = -rte_errno; 1308 goto ret; 1309 } 1310 PMD_DRV_LOG_LINE(DEBUG, "Queue index %d", act_q->index); 1311 1312 vnic_id = attr->group; 1313 if (!vnic_id) { 1314 PMD_DRV_LOG_LINE(DEBUG, "Group id is 0"); 1315 vnic_id = act_q->index; 1316 } 1317 1318 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1319 1320 vnic = &bp->vnic_info[vnic_id]; 1321 if (vnic->rx_queue_cnt) { 1322 if (vnic->start_grp_id != act_q->index) { 1323 PMD_DRV_LOG_LINE(ERR, 1324 "VNIC already in use"); 1325 rte_flow_error_set(error, 1326 EINVAL, 1327 RTE_FLOW_ERROR_TYPE_ACTION, 1328 act, 1329 "VNIC already in use"); 1330 rc = -rte_errno; 1331 goto ret; 1332 } 1333 goto use_vnic; 1334 } 1335 1336 rxq = bp->rx_queues[act_q->index]; 1337 1338 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq && 1339 vnic->fw_vnic_id != INVALID_HW_RING_ID) 1340 goto use_vnic; 1341 1342 if (!rxq) { 1343 PMD_DRV_LOG_LINE(ERR, 1344 "Queue invalid or used with other VNIC"); 1345 rte_flow_error_set(error, 1346 EINVAL, 1347 RTE_FLOW_ERROR_TYPE_ACTION, 1348 act, 1349 "Queue invalid queue or in use"); 1350 rc = -rte_errno; 1351 goto ret; 1352 } 1353 1354 rxq->vnic = vnic; 1355 rxq->rx_started = 1; 1356 vnic->rx_queue_cnt++; 1357 vnic->start_grp_id = act_q->index; 1358 vnic->end_grp_id = act_q->index; 1359 vnic->func_default = 0; //This is not a default VNIC. 1360 1361 PMD_DRV_LOG_LINE(DEBUG, "VNIC found"); 1362 1363 rc = bnxt_vnic_prep(bp, vnic, act, error); 1364 if (rc) 1365 goto ret; 1366 1367 PMD_DRV_LOG_LINE(DEBUG, 1368 "vnic[%d] = %p vnic->fw_grp_ids = %p", 1369 act_q->index, vnic, vnic->fw_grp_ids); 1370 1371 use_vnic: 1372 vnic->ff_pool_idx = vnic_id; 1373 PMD_DRV_LOG_LINE(DEBUG, 1374 "Setting vnic ff_idx %d", vnic->ff_pool_idx); 1375 filter->dst_id = vnic->fw_vnic_id; 1376 1377 /* For ntuple filter, create the L2 filter with default VNIC. 1378 * The user specified redirect queue will be set while creating 1379 * the ntuple filter in hardware. 1380 */ 1381 vnic0 = bnxt_get_default_vnic(bp); 1382 if (use_ntuple) 1383 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1384 else 1385 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1386 if (filter1 == NULL) { 1387 rte_flow_error_set(error, 1388 ENOSPC, 1389 RTE_FLOW_ERROR_TYPE_ACTION, 1390 act, 1391 "Filter not available"); 1392 rc = -rte_errno; 1393 goto ret; 1394 } 1395 1396 PMD_DRV_LOG_LINE(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d", 1397 filter, filter1, filter1->l2_ref_cnt); 1398 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1399 break; 1400 case RTE_FLOW_ACTION_TYPE_DROP: 1401 vnic0 = &bp->vnic_info[0]; 1402 filter->dst_id = vnic0->fw_vnic_id; 1403 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG; 1404 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1405 if (filter1 == NULL) { 1406 rte_flow_error_set(error, 1407 ENOSPC, 1408 RTE_FLOW_ERROR_TYPE_ACTION, 1409 act, 1410 "Filter not available"); 1411 rc = -rte_errno; 1412 goto ret; 1413 } 1414 1415 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1416 filter->flags = 1417 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; 1418 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1419 filter->flags = 1420 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1421 1422 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1423 break; 1424 case RTE_FLOW_ACTION_TYPE_VF: 1425 act_vf = (const struct rte_flow_action_vf *)act->conf; 1426 vf = act_vf->id; 1427 1428 if (filter->tunnel_type == 1429 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 1430 filter->tunnel_type == 1431 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) { 1432 /* If issued on a VF, ensure id is 0 and is trusted */ 1433 if (BNXT_VF(bp)) { 1434 if (!BNXT_VF_IS_TRUSTED(bp) || vf) { 1435 rte_flow_error_set(error, EINVAL, 1436 RTE_FLOW_ERROR_TYPE_ACTION, 1437 act, 1438 "Incorrect VF"); 1439 rc = -rte_errno; 1440 goto ret; 1441 } 1442 } 1443 1444 filter->enables |= filter->tunnel_type; 1445 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER; 1446 goto done; 1447 } 1448 1449 if (vf >= bp->pdev->max_vfs) { 1450 rte_flow_error_set(error, 1451 EINVAL, 1452 RTE_FLOW_ERROR_TYPE_ACTION, 1453 act, 1454 "Incorrect VF id!"); 1455 rc = -rte_errno; 1456 goto ret; 1457 } 1458 1459 filter->mirror_vnic_id = 1460 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 1461 if (dflt_vnic < 0) { 1462 /* This simply indicates there's no driver loaded. 1463 * This is not an error. 1464 */ 1465 rte_flow_error_set(error, 1466 EINVAL, 1467 RTE_FLOW_ERROR_TYPE_ACTION, 1468 act, 1469 "Unable to get default VNIC for VF"); 1470 rc = -rte_errno; 1471 goto ret; 1472 } 1473 1474 filter->mirror_vnic_id = dflt_vnic; 1475 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 1476 1477 vnic0 = &bp->vnic_info[0]; 1478 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1479 if (filter1 == NULL) { 1480 rte_flow_error_set(error, 1481 ENOSPC, 1482 RTE_FLOW_ERROR_TYPE_ACTION, 1483 act, 1484 "New filter not available"); 1485 rc = -rte_errno; 1486 goto ret; 1487 } 1488 1489 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1490 filter->flow_id = filter1->flow_id; 1491 break; 1492 case RTE_FLOW_ACTION_TYPE_RSS: 1493 rc = bnxt_validate_rss_action(actions); 1494 if (rc != 0) { 1495 rte_flow_error_set(error, 1496 EINVAL, 1497 RTE_FLOW_ERROR_TYPE_ACTION, 1498 act, 1499 "Invalid actions specified with RSS"); 1500 rc = -rte_errno; 1501 goto ret; 1502 } 1503 1504 rss = (const struct rte_flow_action_rss *)act->conf; 1505 1506 vnic_id = bnxt_get_vnic(bp, attr->group); 1507 1508 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1509 vnic = &bp->vnic_info[vnic_id]; 1510 1511 /* 1512 * For non NS3 cases, rte_flow_items will not be considered 1513 * for RSS updates. 1514 */ 1515 if (filter->filter_type == HWRM_CFA_CONFIG) { 1516 /* RSS config update requested */ 1517 rc = bnxt_vnic_rss_cfg_update(bp, vnic, act, error); 1518 if (rc != 0) 1519 goto ret; 1520 1521 filter->dst_id = vnic->fw_vnic_id; 1522 break; 1523 } 1524 1525 /* Check if requested RSS config matches RSS config of VNIC 1526 * only if it is not a fresh VNIC configuration. 1527 * Otherwise the existing VNIC configuration can be used. 1528 */ 1529 if (vnic->rx_queue_cnt) { 1530 rc = match_vnic_rss_cfg(bp, vnic, rss); 1531 if (rc) { 1532 PMD_DRV_LOG_LINE(ERR, 1533 "VNIC and RSS config mismatch"); 1534 rte_flow_error_set(error, 1535 EINVAL, 1536 RTE_FLOW_ERROR_TYPE_ACTION, 1537 act, 1538 "VNIC and RSS cfg mismatch"); 1539 rc = -rte_errno; 1540 goto ret; 1541 } 1542 goto vnic_found; 1543 } 1544 1545 for (i = 0; i < rss->queue_num; i++) { 1546 PMD_DRV_LOG_LINE(DEBUG, "RSS action Queue %d", 1547 rss->queue[i]); 1548 1549 if (!rss->queue[i] || 1550 rss->queue[i] >= bp->rx_nr_rings || 1551 !bp->rx_queues[rss->queue[i]]) { 1552 rte_flow_error_set(error, 1553 EINVAL, 1554 RTE_FLOW_ERROR_TYPE_ACTION, 1555 act, 1556 "Invalid queue ID for RSS"); 1557 rc = -rte_errno; 1558 goto ret; 1559 } 1560 rxq = bp->rx_queues[rss->queue[i]]; 1561 1562 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] != 1563 INVALID_HW_RING_ID) { 1564 PMD_DRV_LOG_LINE(ERR, 1565 "queue active with other VNIC"); 1566 rte_flow_error_set(error, 1567 EINVAL, 1568 RTE_FLOW_ERROR_TYPE_ACTION, 1569 act, 1570 "Invalid queue ID for RSS"); 1571 rc = -rte_errno; 1572 goto ret; 1573 } 1574 1575 rxq->vnic = vnic; 1576 rxq->rx_started = 1; 1577 vnic->rx_queue_cnt++; 1578 } 1579 1580 vnic->start_grp_id = rss->queue[0]; 1581 vnic->end_grp_id = rss->queue[rss->queue_num - 1]; 1582 vnic->func_default = 0; //This is not a default VNIC. 1583 1584 rc = bnxt_vnic_prep(bp, vnic, act, error); 1585 if (rc) 1586 goto ret; 1587 1588 PMD_DRV_LOG_LINE(DEBUG, 1589 "vnic[%d] = %p vnic->fw_grp_ids = %p", 1590 vnic_id, vnic, vnic->fw_grp_ids); 1591 1592 vnic->ff_pool_idx = vnic_id; 1593 PMD_DRV_LOG_LINE(DEBUG, 1594 "Setting vnic ff_pool_idx %d", vnic->ff_pool_idx); 1595 1596 /* This can be done only after vnic_grp_alloc is done. */ 1597 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1598 vnic->fw_grp_ids[i] = 1599 bp->grp_info[rss->queue[i]].fw_grp_id; 1600 /* Make sure vnic0 does not use these rings. */ 1601 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] = 1602 INVALID_HW_RING_ID; 1603 } 1604 1605 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) { 1606 for (i = 0; i < vnic->rx_queue_cnt; i++) 1607 vnic->rss_table[rss_idx++] = 1608 vnic->fw_grp_ids[i]; 1609 } 1610 1611 /* Configure RSS only if the queue count is > 1 */ 1612 if (vnic->rx_queue_cnt > 1) { 1613 vnic->hash_type = 1614 bnxt_rte_to_hwrm_hash_types(rss->types); 1615 vnic->hash_mode = 1616 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1617 1618 if (!rss->key_len) { 1619 /* If hash key has not been specified, 1620 * use random hash key. 1621 */ 1622 bnxt_prandom_bytes(vnic->rss_hash_key, 1623 HW_HASH_KEY_SIZE); 1624 } else { 1625 if (rss->key_len > HW_HASH_KEY_SIZE) 1626 memcpy(vnic->rss_hash_key, 1627 rss->key, 1628 HW_HASH_KEY_SIZE); 1629 else 1630 memcpy(vnic->rss_hash_key, 1631 rss->key, 1632 rss->key_len); 1633 } 1634 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1635 } else { 1636 PMD_DRV_LOG_LINE(DEBUG, "No RSS config required"); 1637 } 1638 1639 vnic_found: 1640 filter->dst_id = vnic->fw_vnic_id; 1641 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1642 if (filter1 == NULL) { 1643 rte_flow_error_set(error, 1644 ENOSPC, 1645 RTE_FLOW_ERROR_TYPE_ACTION, 1646 act, 1647 "New filter not available"); 1648 rc = -rte_errno; 1649 goto ret; 1650 } 1651 1652 PMD_DRV_LOG_LINE(DEBUG, "L2 filter created"); 1653 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1654 break; 1655 case RTE_FLOW_ACTION_TYPE_MARK: 1656 if (bp->mark_table == NULL) { 1657 rte_flow_error_set(error, 1658 ENOMEM, 1659 RTE_FLOW_ERROR_TYPE_ACTION, 1660 act, 1661 "Mark table not allocated."); 1662 rc = -rte_errno; 1663 goto ret; 1664 } 1665 1666 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 1667 PMD_DRV_LOG_LINE(DEBUG, 1668 "Disabling vector processing for mark"); 1669 bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts; 1670 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1671 } 1672 1673 filter->valid_flags |= BNXT_FLOW_MARK_FLAG; 1674 filter->mark = ((const struct rte_flow_action_mark *) 1675 act->conf)->id; 1676 PMD_DRV_LOG_LINE(DEBUG, "Mark the flow %d", filter->mark); 1677 break; 1678 default: 1679 rte_flow_error_set(error, 1680 EINVAL, 1681 RTE_FLOW_ERROR_TYPE_ACTION, 1682 act, 1683 "Invalid action."); 1684 rc = -rte_errno; 1685 goto ret; 1686 } 1687 1688 done: 1689 act = bnxt_flow_non_void_action(++act); 1690 while (act->type != RTE_FLOW_ACTION_TYPE_END) 1691 goto start; 1692 1693 return rc; 1694 ret: 1695 1696 if (filter1) { 1697 bnxt_hwrm_clear_l2_filter(bp, filter1); 1698 bnxt_free_filter(bp, filter1); 1699 } 1700 1701 if (rte_errno) { 1702 if (vnic && STAILQ_EMPTY(&vnic->filter)) { 1703 vnic->rx_queue_cnt = 0; 1704 vnic->rss_types_local = 0; 1705 } 1706 1707 if (rxq && !vnic->rx_queue_cnt) 1708 rxq->vnic = &bp->vnic_info[0]; 1709 } 1710 return -rte_errno; 1711 } 1712 1713 static 1714 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, 1715 struct bnxt_filter_info *filter) 1716 { 1717 struct bnxt_vnic_info *vnic = NULL; 1718 unsigned int i; 1719 1720 for (i = 0; i < bp->max_vnics; i++) { 1721 vnic = &bp->vnic_info[i]; 1722 if (vnic->fw_vnic_id != INVALID_VNIC_ID && 1723 filter->dst_id == vnic->fw_vnic_id) { 1724 PMD_DRV_LOG_LINE(DEBUG, "Found matching VNIC Id %d", 1725 vnic->ff_pool_idx); 1726 return vnic; 1727 } 1728 } 1729 return NULL; 1730 } 1731 1732 static int 1733 bnxt_flow_validate(struct rte_eth_dev *dev, 1734 const struct rte_flow_attr *attr, 1735 const struct rte_flow_item pattern[], 1736 const struct rte_flow_action actions[], 1737 struct rte_flow_error *error) 1738 { 1739 struct bnxt *bp = dev->data->dev_private; 1740 struct bnxt_vnic_info *vnic = NULL; 1741 struct bnxt_filter_info *filter; 1742 int ret = 0; 1743 1744 bnxt_acquire_flow_lock(bp); 1745 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1746 if (ret != 0) { 1747 bnxt_release_flow_lock(bp); 1748 return ret; 1749 } 1750 1751 filter = bnxt_get_unused_filter(bp); 1752 if (filter == NULL) { 1753 rte_flow_error_set(error, ENOSPC, 1754 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1755 "Not enough resources for a new flow"); 1756 bnxt_release_flow_lock(bp); 1757 return -ENOSPC; 1758 } 1759 1760 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1761 error, filter); 1762 if (ret) 1763 goto exit; 1764 1765 vnic = find_matching_vnic(bp, filter); 1766 if (vnic) { 1767 if (STAILQ_EMPTY(&vnic->filter)) { 1768 bnxt_vnic_cleanup(bp, vnic); 1769 bp->nr_vnics--; 1770 PMD_DRV_LOG_LINE(DEBUG, "Free VNIC"); 1771 } 1772 } 1773 1774 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1775 bnxt_hwrm_clear_em_filter(bp, filter); 1776 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1777 bnxt_hwrm_clear_ntuple_filter(bp, filter); 1778 else 1779 bnxt_hwrm_clear_l2_filter(bp, filter); 1780 1781 exit: 1782 /* No need to hold on to this filter if we are just validating flow */ 1783 bnxt_free_filter(bp, filter); 1784 bnxt_release_flow_lock(bp); 1785 1786 return ret; 1787 } 1788 1789 static void 1790 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, 1791 struct bnxt_filter_info *new_filter) 1792 { 1793 /* Clear the new L2 filter that was created in the previous step in 1794 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new 1795 * filter which points to the new destination queue and so we clear 1796 * the previous L2 filter. For ntuple filters, we are going to reuse 1797 * the old L2 filter and create new NTUPLE filter with this new 1798 * destination queue subsequently during bnxt_flow_create. So we 1799 * decrement the ref cnt of the L2 filter that would've been bumped 1800 * up previously in bnxt_validate_and_parse_flow as the old n-tuple 1801 * filter that was referencing it will be deleted now. 1802 */ 1803 bnxt_hwrm_clear_l2_filter(bp, old_filter); 1804 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { 1805 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); 1806 } else { 1807 if (new_filter->filter_type == HWRM_CFA_EM_FILTER) 1808 bnxt_hwrm_clear_em_filter(bp, old_filter); 1809 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1810 bnxt_hwrm_clear_ntuple_filter(bp, old_filter); 1811 } 1812 } 1813 1814 static int 1815 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 1816 { 1817 struct bnxt_filter_info *mf; 1818 struct rte_flow *flow; 1819 int i; 1820 1821 for (i = bp->max_vnics - 1; i >= 0; i--) { 1822 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1823 1824 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 1825 continue; 1826 1827 STAILQ_FOREACH(flow, &vnic->flow_list, next) { 1828 mf = flow->filter; 1829 1830 if (mf->filter_type == nf->filter_type && 1831 mf->flags == nf->flags && 1832 mf->src_port == nf->src_port && 1833 mf->src_port_mask == nf->src_port_mask && 1834 mf->dst_port == nf->dst_port && 1835 mf->dst_port_mask == nf->dst_port_mask && 1836 mf->ip_protocol == nf->ip_protocol && 1837 mf->ip_addr_type == nf->ip_addr_type && 1838 mf->ethertype == nf->ethertype && 1839 mf->vni == nf->vni && 1840 mf->tunnel_type == nf->tunnel_type && 1841 mf->l2_ovlan == nf->l2_ovlan && 1842 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 1843 mf->l2_ivlan == nf->l2_ivlan && 1844 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 1845 !memcmp(mf->l2_addr, nf->l2_addr, 1846 RTE_ETHER_ADDR_LEN) && 1847 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 1848 RTE_ETHER_ADDR_LEN) && 1849 !memcmp(mf->src_macaddr, nf->src_macaddr, 1850 RTE_ETHER_ADDR_LEN) && 1851 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 1852 RTE_ETHER_ADDR_LEN) && 1853 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 1854 sizeof(nf->src_ipaddr)) && 1855 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 1856 sizeof(nf->src_ipaddr_mask)) && 1857 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 1858 sizeof(nf->dst_ipaddr)) && 1859 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 1860 sizeof(nf->dst_ipaddr_mask))) { 1861 if (mf->dst_id == nf->dst_id) 1862 return -EEXIST; 1863 /* Free the old filter, update flow 1864 * with new filter 1865 */ 1866 bnxt_update_filter(bp, mf, nf); 1867 STAILQ_REMOVE(&vnic->filter, mf, 1868 bnxt_filter_info, next); 1869 STAILQ_INSERT_TAIL(&vnic->filter, nf, next); 1870 bnxt_free_filter(bp, mf); 1871 flow->filter = nf; 1872 return -EXDEV; 1873 } 1874 } 1875 } 1876 return 0; 1877 } 1878 1879 static void 1880 bnxt_setup_flow_counter(struct bnxt *bp) 1881 { 1882 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && 1883 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { 1884 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1885 bnxt_flow_cnt_alarm_cb, 1886 (void *)bp); 1887 bp->flags |= BNXT_FLAG_FC_THREAD; 1888 } 1889 } 1890 1891 void bnxt_flow_cnt_alarm_cb(void *arg) 1892 { 1893 int rc = 0; 1894 struct bnxt *bp = arg; 1895 1896 if (!bp->flow_stat->rx_fc_out_tbl.va) { 1897 PMD_DRV_LOG_LINE(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?"); 1898 bnxt_cancel_fc_thread(bp); 1899 return; 1900 } 1901 1902 if (!bp->flow_stat->flow_count) { 1903 bnxt_cancel_fc_thread(bp); 1904 return; 1905 } 1906 1907 if (!bp->eth_dev->data->dev_started) { 1908 bnxt_cancel_fc_thread(bp); 1909 return; 1910 } 1911 1912 rc = bnxt_flow_stats_req(bp); 1913 if (rc) { 1914 PMD_DRV_LOG_LINE(ERR, "Flow stat alarm not rescheduled."); 1915 return; 1916 } 1917 1918 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1919 bnxt_flow_cnt_alarm_cb, 1920 (void *)bp); 1921 } 1922 1923 /* Query an requested flow rule. */ 1924 static int 1925 bnxt_flow_query_all(struct rte_flow *flow, 1926 const struct rte_flow_action *actions, void *data, 1927 struct rte_flow_error *error) 1928 { 1929 struct rte_flow_action_rss *rss_conf; 1930 struct bnxt_vnic_info *vnic; 1931 1932 vnic = flow->vnic; 1933 if (vnic == NULL) 1934 return rte_flow_error_set(error, EINVAL, 1935 RTE_FLOW_ERROR_TYPE_HANDLE, flow, 1936 "Invalid flow: failed to query flow."); 1937 1938 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1939 switch (actions->type) { 1940 case RTE_FLOW_ACTION_TYPE_VOID: 1941 break; 1942 case RTE_FLOW_ACTION_TYPE_COUNT: 1943 break; 1944 case RTE_FLOW_ACTION_TYPE_RSS: 1945 /* Full details of rte_flow_action_rss not available yet TBD*/ 1946 rss_conf = (struct rte_flow_action_rss *)data; 1947 1948 /* toeplitz is default */ 1949 if (vnic->ring_select_mode == 1950 HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ) 1951 rss_conf->func = vnic->hash_f_local; 1952 else 1953 rss_conf->func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; 1954 1955 break; 1956 default: 1957 return rte_flow_error_set(error, ENOTSUP, 1958 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1959 "action is not supported"); 1960 } 1961 } 1962 1963 return 0; 1964 } 1965 1966 static int 1967 bnxt_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 1968 const struct rte_flow_action *actions, void *data, 1969 struct rte_flow_error *error) 1970 { 1971 struct bnxt *bp = dev->data->dev_private; 1972 int ret = 0; 1973 1974 if (bp == NULL) 1975 return -ENODEV; 1976 1977 bnxt_acquire_flow_lock(bp); 1978 ret = bnxt_flow_query_all(flow, actions, data, error); 1979 bnxt_release_flow_lock(bp); 1980 1981 return ret; 1982 } 1983 1984 static struct rte_flow * 1985 bnxt_flow_create(struct rte_eth_dev *dev, 1986 const struct rte_flow_attr *attr, 1987 const struct rte_flow_item pattern[], 1988 const struct rte_flow_action actions[], 1989 struct rte_flow_error *error) 1990 { 1991 struct bnxt *bp = dev->data->dev_private; 1992 struct bnxt_vnic_info *vnic = NULL; 1993 struct bnxt_filter_info *filter; 1994 bool update_flow = false; 1995 struct rte_flow *flow; 1996 int ret = 0; 1997 uint32_t tun_type, flow_id; 1998 1999 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 2000 rte_flow_error_set(error, EINVAL, 2001 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2002 "Failed to create flow, Not a Trusted VF!"); 2003 return NULL; 2004 } 2005 2006 if (!dev->data->dev_started) { 2007 rte_flow_error_set(error, 2008 EINVAL, 2009 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2010 NULL, 2011 "Device must be started"); 2012 return NULL; 2013 } 2014 2015 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); 2016 if (!flow) { 2017 rte_flow_error_set(error, ENOMEM, 2018 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2019 "Failed to allocate memory"); 2020 return flow; 2021 } 2022 2023 bnxt_acquire_flow_lock(bp); 2024 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 2025 if (ret != 0) { 2026 PMD_DRV_LOG_LINE(ERR, "Not a validate flow."); 2027 goto free_flow; 2028 } 2029 2030 filter = bnxt_get_unused_filter(bp); 2031 if (filter == NULL) { 2032 rte_flow_error_set(error, ENOSPC, 2033 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2034 "Not enough resources for a new flow"); 2035 goto free_flow; 2036 } 2037 2038 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 2039 error, filter); 2040 if (ret != 0) 2041 goto free_filter; 2042 2043 ret = bnxt_match_filter(bp, filter); 2044 if (ret == -EEXIST) { 2045 PMD_DRV_LOG_LINE(DEBUG, "Flow already exists."); 2046 /* Clear the filter that was created as part of 2047 * validate_and_parse_flow() above 2048 */ 2049 bnxt_hwrm_clear_l2_filter(bp, filter); 2050 goto free_filter; 2051 } else if (ret == -EXDEV) { 2052 PMD_DRV_LOG_LINE(DEBUG, "Flow with same pattern exists"); 2053 PMD_DRV_LOG_LINE(DEBUG, "Updating with different destination"); 2054 update_flow = true; 2055 } 2056 2057 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 2058 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 2059 * in such a case. 2060 */ 2061 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2062 (filter->enables == filter->tunnel_type || 2063 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 2064 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) { 2065 if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) { 2066 struct rte_eth_udp_tunnel tunnel = {0}; 2067 2068 /* hwrm_tunnel_dst_port_alloc converts to Big Endian */ 2069 tunnel.udp_port = BNXT_NTOHS(filter->dst_port); 2070 if (filter->tunnel_type == 2071 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) { 2072 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN; 2073 } else if (filter->tunnel_type == 2074 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) { 2075 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE; 2076 } else { 2077 rte_flow_error_set(error, EINVAL, 2078 RTE_FLOW_ERROR_TYPE_HANDLE, 2079 NULL, 2080 "Invalid tunnel type"); 2081 ret = -EINVAL; 2082 goto free_filter; 2083 } 2084 ret = bnxt_udp_tunnel_port_add_op(bp->eth_dev, &tunnel); 2085 if (ret != 0) { 2086 rte_flow_error_set(error, -ret, 2087 RTE_FLOW_ERROR_TYPE_HANDLE, 2088 NULL, 2089 "Fail to add tunnel port"); 2090 goto free_filter; 2091 } 2092 } 2093 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2094 if (ret) { 2095 rte_flow_error_set(error, -ret, 2096 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2097 "Unable to query tunnel to VF"); 2098 goto free_filter; 2099 } 2100 if (tun_type == (1U << filter->tunnel_type)) { 2101 ret = 2102 bnxt_hwrm_tunnel_redirect_free(bp, 2103 filter->tunnel_type); 2104 if (ret) { 2105 PMD_DRV_LOG_LINE(ERR, 2106 "Unable to free existing tunnel"); 2107 rte_flow_error_set(error, -ret, 2108 RTE_FLOW_ERROR_TYPE_HANDLE, 2109 NULL, 2110 "Unable to free preexisting " 2111 "tunnel on VF"); 2112 goto free_filter; 2113 } 2114 } 2115 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type); 2116 if (ret) { 2117 rte_flow_error_set(error, -ret, 2118 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2119 "Unable to redirect tunnel to VF"); 2120 goto free_filter; 2121 } 2122 vnic = &bp->vnic_info[0]; 2123 goto done; 2124 } 2125 2126 if (filter->filter_type == HWRM_CFA_EM_FILTER) { 2127 filter->enables |= 2128 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2129 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); 2130 if (ret != 0) { 2131 rte_flow_error_set(error, -ret, 2132 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2133 "Failed to create EM filter"); 2134 goto free_filter; 2135 } 2136 } 2137 2138 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { 2139 filter->enables |= 2140 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2141 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); 2142 if (ret != 0) { 2143 rte_flow_error_set(error, -ret, 2144 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2145 "Failed to create ntuple filter"); 2146 goto free_filter; 2147 } 2148 } 2149 2150 vnic = find_matching_vnic(bp, filter); 2151 done: 2152 if (!ret || update_flow) { 2153 flow->filter = filter; 2154 flow->vnic = vnic; 2155 if (update_flow) { 2156 ret = -EXDEV; 2157 goto free_flow; 2158 } 2159 2160 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2161 PMD_DRV_LOG_LINE(DEBUG, 2162 "Mark action: mark id 0x%x, flow id 0x%x", 2163 filter->mark, filter->flow_id); 2164 2165 /* TCAM and EM should be 16-bit only. 2166 * Other modes not supported. 2167 */ 2168 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2169 if (bp->mark_table[flow_id].valid) { 2170 rte_flow_error_set(error, EEXIST, 2171 RTE_FLOW_ERROR_TYPE_HANDLE, 2172 NULL, 2173 "Flow with mark id exists"); 2174 bnxt_clear_one_vnic_filter(bp, filter); 2175 goto free_filter; 2176 } 2177 bp->mark_table[flow_id].valid = true; 2178 bp->mark_table[flow_id].mark_id = filter->mark; 2179 } 2180 2181 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2182 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); 2183 2184 if (BNXT_FLOW_XSTATS_EN(bp)) 2185 bp->flow_stat->flow_count++; 2186 bnxt_release_flow_lock(bp); 2187 bnxt_setup_flow_counter(bp); 2188 PMD_DRV_LOG_LINE(DEBUG, "Successfully created flow."); 2189 return flow; 2190 } 2191 2192 free_filter: 2193 bnxt_free_filter(bp, filter); 2194 free_flow: 2195 if (ret == -EEXIST) 2196 rte_flow_error_set(error, ret, 2197 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2198 "Matching Flow exists."); 2199 else if (ret == -EXDEV) 2200 rte_flow_error_set(error, 0, 2201 RTE_FLOW_ERROR_TYPE_NONE, NULL, 2202 "Flow with pattern exists, updating destination queue"); 2203 else if (!rte_errno) 2204 rte_flow_error_set(error, -ret, 2205 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2206 "Failed to create flow."); 2207 rte_free(flow); 2208 flow = NULL; 2209 bnxt_release_flow_lock(bp); 2210 return flow; 2211 } 2212 2213 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, 2214 struct bnxt_filter_info *filter, 2215 struct rte_flow_error *error) 2216 { 2217 uint16_t tun_dst_fid; 2218 uint32_t tun_type; 2219 int ret = 0; 2220 2221 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2222 if (ret) { 2223 rte_flow_error_set(error, -ret, 2224 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2225 "Unable to query tunnel to VF"); 2226 return ret; 2227 } 2228 if (tun_type == (1U << filter->tunnel_type)) { 2229 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type, 2230 &tun_dst_fid); 2231 if (ret) { 2232 rte_flow_error_set(error, -ret, 2233 RTE_FLOW_ERROR_TYPE_HANDLE, 2234 NULL, 2235 "tunnel_redirect info cmd fail"); 2236 return ret; 2237 } 2238 PMD_DRV_LOG_LINE(INFO, "Pre-existing tunnel fid = %x vf->fid = %x", 2239 tun_dst_fid + bp->first_vf_id, bp->fw_fid); 2240 2241 /* Tunnel doesn't belong to this VF, so don't send HWRM 2242 * cmd, just delete the flow from driver 2243 */ 2244 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) { 2245 PMD_DRV_LOG_LINE(ERR, 2246 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free"); 2247 } else { 2248 ret = bnxt_hwrm_tunnel_redirect_free(bp, 2249 filter->tunnel_type); 2250 if (ret) { 2251 rte_flow_error_set(error, -ret, 2252 RTE_FLOW_ERROR_TYPE_HANDLE, 2253 NULL, 2254 "Unable to free tunnel redirection"); 2255 return ret; 2256 } 2257 } 2258 } 2259 return ret; 2260 } 2261 2262 static int 2263 _bnxt_flow_destroy(struct bnxt *bp, 2264 struct rte_flow *flow, 2265 struct rte_flow_error *error) 2266 { 2267 struct bnxt_filter_info *filter; 2268 struct bnxt_vnic_info *vnic; 2269 int ret = 0; 2270 uint32_t flow_id; 2271 2272 filter = flow->filter; 2273 vnic = flow->vnic; 2274 2275 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 2276 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 2277 * in such a case. 2278 */ 2279 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2280 (filter->enables == filter->tunnel_type || 2281 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 2282 filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) { 2283 if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) { 2284 struct rte_eth_udp_tunnel tunnel = {0}; 2285 2286 /* hwrm_tunnel_dst_port_free converts to Big Endian */ 2287 tunnel.udp_port = BNXT_NTOHS(filter->dst_port); 2288 if (filter->tunnel_type == 2289 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) { 2290 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN; 2291 } else if (filter->tunnel_type == 2292 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) { 2293 tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE; 2294 } else { 2295 rte_flow_error_set(error, EINVAL, 2296 RTE_FLOW_ERROR_TYPE_HANDLE, 2297 NULL, 2298 "Invalid tunnel type"); 2299 return ret; 2300 } 2301 2302 ret = bnxt_udp_tunnel_port_del_op(bp->eth_dev, 2303 &tunnel); 2304 if (ret) 2305 return ret; 2306 } 2307 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); 2308 if (!ret) 2309 goto done; 2310 else 2311 return ret; 2312 } 2313 2314 /* For config type, there is no filter in HW. Finish cleanup here */ 2315 if (filter->filter_type == HWRM_CFA_CONFIG) 2316 goto done; 2317 2318 ret = bnxt_match_filter(bp, filter); 2319 if (ret == 0) 2320 PMD_DRV_LOG_LINE(ERR, "Could not find matching flow"); 2321 2322 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2323 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2324 memset(&bp->mark_table[flow_id], 0, 2325 sizeof(bp->mark_table[flow_id])); 2326 filter->flow_id = 0; 2327 } 2328 2329 ret = bnxt_clear_one_vnic_filter(bp, filter); 2330 2331 done: 2332 if (!ret) { 2333 /* If it is a L2 drop filter, when the filter is created, 2334 * the FW updates the BC/MC records. 2335 * Once this filter is removed, issue the set_rx_mask command 2336 * to reset the BC/MC records in the HW to the settings 2337 * before the drop counter is created. 2338 */ 2339 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG) 2340 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]); 2341 2342 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); 2343 bnxt_free_filter(bp, filter); 2344 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); 2345 rte_free(flow); 2346 if (BNXT_FLOW_XSTATS_EN(bp)) 2347 bp->flow_stat->flow_count--; 2348 2349 /* If this was the last flow associated with this vnic, 2350 * switch the queue back to RSS pool. 2351 */ 2352 if (vnic && !vnic->func_default && 2353 STAILQ_EMPTY(&vnic->flow_list)) { 2354 bnxt_vnic_cleanup(bp, vnic); 2355 bp->nr_vnics--; 2356 } 2357 } else { 2358 rte_flow_error_set(error, -ret, 2359 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2360 "Failed to destroy flow."); 2361 } 2362 2363 return ret; 2364 } 2365 2366 static int 2367 bnxt_flow_destroy(struct rte_eth_dev *dev, 2368 struct rte_flow *flow, 2369 struct rte_flow_error *error) 2370 { 2371 struct bnxt *bp = dev->data->dev_private; 2372 int ret = 0; 2373 2374 bnxt_acquire_flow_lock(bp); 2375 if (!flow) { 2376 rte_flow_error_set(error, EINVAL, 2377 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2378 "Invalid flow: failed to destroy flow."); 2379 bnxt_release_flow_lock(bp); 2380 return -EINVAL; 2381 } 2382 2383 if (!flow->filter) { 2384 rte_flow_error_set(error, EINVAL, 2385 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2386 "Invalid flow: failed to destroy flow."); 2387 bnxt_release_flow_lock(bp); 2388 return -EINVAL; 2389 } 2390 ret = _bnxt_flow_destroy(bp, flow, error); 2391 bnxt_release_flow_lock(bp); 2392 2393 return ret; 2394 } 2395 2396 void bnxt_cancel_fc_thread(struct bnxt *bp) 2397 { 2398 bp->flags &= ~BNXT_FLAG_FC_THREAD; 2399 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); 2400 } 2401 2402 static int 2403 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) 2404 { 2405 struct bnxt *bp = dev->data->dev_private; 2406 struct bnxt_vnic_info *vnic; 2407 struct rte_flow *flow; 2408 unsigned int i; 2409 int ret = 0; 2410 2411 bnxt_acquire_flow_lock(bp); 2412 for (i = 0; i < bp->max_vnics; i++) { 2413 vnic = &bp->vnic_info[i]; 2414 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) 2415 continue; 2416 2417 while (!STAILQ_EMPTY(&vnic->flow_list)) { 2418 flow = STAILQ_FIRST(&vnic->flow_list); 2419 2420 if (!flow->filter) 2421 continue; 2422 2423 ret = _bnxt_flow_destroy(bp, flow, error); 2424 if (ret) 2425 break; 2426 } 2427 } 2428 2429 bnxt_cancel_fc_thread(bp); 2430 bnxt_release_flow_lock(bp); 2431 2432 return ret; 2433 } 2434 2435 const struct rte_flow_ops bnxt_flow_ops = { 2436 .validate = bnxt_flow_validate, 2437 .create = bnxt_flow_create, 2438 .destroy = bnxt_flow_destroy, 2439 .flush = bnxt_flow_flush, 2440 .query = bnxt_flow_query, 2441 }; 2442