1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 8 #include <rte_log.h> 9 #include <rte_malloc.h> 10 #include <rte_flow.h> 11 #include <rte_flow_driver.h> 12 #include <rte_tailq.h> 13 #include <rte_alarm.h> 14 #include <rte_cycles.h> 15 16 #include "bnxt.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_ring.h" 20 #include "bnxt_rxq.h" 21 #include "bnxt_rxr.h" 22 #include "bnxt_vnic.h" 23 #include "hsi_struct_def_dpdk.h" 24 25 static int 26 bnxt_flow_args_validate(const struct rte_flow_attr *attr, 27 const struct rte_flow_item pattern[], 28 const struct rte_flow_action actions[], 29 struct rte_flow_error *error) 30 { 31 if (!pattern) { 32 rte_flow_error_set(error, 33 EINVAL, 34 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 35 NULL, 36 "NULL pattern."); 37 return -rte_errno; 38 } 39 40 if (!actions) { 41 rte_flow_error_set(error, 42 EINVAL, 43 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 44 NULL, 45 "NULL action."); 46 return -rte_errno; 47 } 48 49 if (!attr) { 50 rte_flow_error_set(error, 51 EINVAL, 52 RTE_FLOW_ERROR_TYPE_ATTR, 53 NULL, 54 "NULL attribute."); 55 return -rte_errno; 56 } 57 58 return 0; 59 } 60 61 static const struct rte_flow_item * 62 bnxt_flow_non_void_item(const struct rte_flow_item *cur) 63 { 64 while (1) { 65 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) 66 return cur; 67 cur++; 68 } 69 } 70 71 static const struct rte_flow_action * 72 bnxt_flow_non_void_action(const struct rte_flow_action *cur) 73 { 74 while (1) { 75 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) 76 return cur; 77 cur++; 78 } 79 } 80 81 static int 82 bnxt_filter_type_check(const struct rte_flow_item pattern[], 83 struct rte_flow_error *error) 84 { 85 const struct rte_flow_item *item = 86 bnxt_flow_non_void_item(pattern); 87 int use_ntuple = 1; 88 bool has_vlan = 0; 89 90 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 91 switch (item->type) { 92 case RTE_FLOW_ITEM_TYPE_ANY: 93 case RTE_FLOW_ITEM_TYPE_ETH: 94 use_ntuple = 0; 95 break; 96 case RTE_FLOW_ITEM_TYPE_VLAN: 97 use_ntuple = 0; 98 has_vlan = 1; 99 break; 100 case RTE_FLOW_ITEM_TYPE_IPV4: 101 case RTE_FLOW_ITEM_TYPE_IPV6: 102 case RTE_FLOW_ITEM_TYPE_TCP: 103 case RTE_FLOW_ITEM_TYPE_UDP: 104 /* FALLTHROUGH */ 105 /* need ntuple match, reset exact match */ 106 use_ntuple |= 1; 107 break; 108 default: 109 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n"); 110 use_ntuple |= 0; 111 } 112 item++; 113 } 114 115 if (has_vlan && use_ntuple) { 116 PMD_DRV_LOG(ERR, 117 "VLAN flow cannot use NTUPLE filter\n"); 118 rte_flow_error_set(error, EINVAL, 119 RTE_FLOW_ERROR_TYPE_ITEM, 120 item, 121 "Cannot use VLAN with NTUPLE"); 122 return -rte_errno; 123 } 124 125 return use_ntuple; 126 } 127 128 static int 129 bnxt_validate_and_parse_flow_type(struct bnxt *bp, 130 const struct rte_flow_attr *attr, 131 const struct rte_flow_item pattern[], 132 struct rte_flow_error *error, 133 struct bnxt_filter_info *filter) 134 { 135 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); 136 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; 137 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; 138 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; 139 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; 140 const struct rte_flow_item_udp *udp_spec, *udp_mask; 141 const struct rte_flow_item_eth *eth_spec, *eth_mask; 142 const struct rte_ether_addr *dst, *src; 143 const struct rte_flow_item_nvgre *nvgre_spec; 144 const struct rte_flow_item_nvgre *nvgre_mask; 145 const struct rte_flow_item_gre *gre_spec; 146 const struct rte_flow_item_gre *gre_mask; 147 const struct rte_flow_item_vxlan *vxlan_spec; 148 const struct rte_flow_item_vxlan *vxlan_mask; 149 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; 150 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; 151 const struct rte_flow_item_vf *vf_spec; 152 uint32_t tenant_id_be = 0, valid_flags = 0; 153 bool vni_masked = 0; 154 bool tni_masked = 0; 155 uint32_t en_ethertype; 156 uint8_t inner = 0; 157 uint32_t vf = 0; 158 uint32_t en = 0; 159 int use_ntuple; 160 int dflt_vnic; 161 162 use_ntuple = bnxt_filter_type_check(pattern, error); 163 if (use_ntuple < 0) 164 return use_ntuple; 165 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); 166 167 filter->filter_type = use_ntuple ? 168 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER; 169 en_ethertype = use_ntuple ? 170 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : 171 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; 172 173 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 174 if (item->last) { 175 /* last or range is NOT supported as match criteria */ 176 rte_flow_error_set(error, EINVAL, 177 RTE_FLOW_ERROR_TYPE_ITEM, 178 item, 179 "No support for range"); 180 return -rte_errno; 181 } 182 183 switch (item->type) { 184 case RTE_FLOW_ITEM_TYPE_ANY: 185 inner = 186 ((const struct rte_flow_item_any *)item->spec)->num > 3; 187 if (inner) 188 PMD_DRV_LOG(DEBUG, "Parse inner header\n"); 189 break; 190 case RTE_FLOW_ITEM_TYPE_ETH: 191 if (!item->spec) 192 break; 193 194 eth_spec = item->spec; 195 196 if (item->mask) 197 eth_mask = item->mask; 198 else 199 eth_mask = &rte_flow_item_eth_mask; 200 201 /* Source MAC address mask cannot be partially set. 202 * Should be All 0's or all 1's. 203 * Destination MAC address mask must not be partially 204 * set. Should be all 1's or all 0's. 205 */ 206 if ((!rte_is_zero_ether_addr(ð_mask->src) && 207 !rte_is_broadcast_ether_addr(ð_mask->src)) || 208 (!rte_is_zero_ether_addr(ð_mask->dst) && 209 !rte_is_broadcast_ether_addr(ð_mask->dst))) { 210 rte_flow_error_set(error, 211 EINVAL, 212 RTE_FLOW_ERROR_TYPE_ITEM, 213 item, 214 "MAC_addr mask not valid"); 215 return -rte_errno; 216 } 217 218 /* Mask is not allowed. Only exact matches are */ 219 if (eth_mask->type && 220 eth_mask->type != RTE_BE16(0xffff)) { 221 rte_flow_error_set(error, EINVAL, 222 RTE_FLOW_ERROR_TYPE_ITEM, 223 item, 224 "ethertype mask not valid"); 225 return -rte_errno; 226 } 227 228 if (rte_is_broadcast_ether_addr(ð_mask->dst)) { 229 dst = ð_spec->dst; 230 if (!rte_is_valid_assigned_ether_addr(dst)) { 231 rte_flow_error_set(error, 232 EINVAL, 233 RTE_FLOW_ERROR_TYPE_ITEM, 234 item, 235 "DMAC is invalid"); 236 PMD_DRV_LOG(ERR, 237 "DMAC is invalid!\n"); 238 return -rte_errno; 239 } 240 rte_memcpy(filter->dst_macaddr, 241 ð_spec->dst, RTE_ETHER_ADDR_LEN); 242 en |= use_ntuple ? 243 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : 244 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; 245 valid_flags |= inner ? 246 BNXT_FLOW_L2_INNER_DST_VALID_FLAG : 247 BNXT_FLOW_L2_DST_VALID_FLAG; 248 filter->priority = attr->priority; 249 PMD_DRV_LOG(DEBUG, 250 "Creating a priority flow\n"); 251 } 252 if (rte_is_broadcast_ether_addr(ð_mask->src)) { 253 src = ð_spec->src; 254 if (!rte_is_valid_assigned_ether_addr(src)) { 255 rte_flow_error_set(error, 256 EINVAL, 257 RTE_FLOW_ERROR_TYPE_ITEM, 258 item, 259 "SMAC is invalid"); 260 PMD_DRV_LOG(ERR, 261 "SMAC is invalid!\n"); 262 return -rte_errno; 263 } 264 rte_memcpy(filter->src_macaddr, 265 ð_spec->src, RTE_ETHER_ADDR_LEN); 266 en |= use_ntuple ? 267 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : 268 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; 269 valid_flags |= inner ? 270 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG : 271 BNXT_FLOW_L2_SRC_VALID_FLAG; 272 } /* 273 * else { 274 * PMD_DRV_LOG(ERR, "Handle this condition\n"); 275 * } 276 */ 277 if (eth_mask->type) { 278 filter->ethertype = 279 rte_be_to_cpu_16(eth_spec->type); 280 en |= en_ethertype; 281 } 282 if (inner) 283 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG; 284 285 break; 286 case RTE_FLOW_ITEM_TYPE_VLAN: 287 vlan_spec = item->spec; 288 289 if (item->mask) 290 vlan_mask = item->mask; 291 else 292 vlan_mask = &rte_flow_item_vlan_mask; 293 294 if (en & en_ethertype) { 295 rte_flow_error_set(error, EINVAL, 296 RTE_FLOW_ERROR_TYPE_ITEM, 297 item, 298 "VLAN TPID matching is not" 299 " supported"); 300 return -rte_errno; 301 } 302 if (vlan_mask->tci && 303 vlan_mask->tci == RTE_BE16(0x0fff)) { 304 /* Only the VLAN ID can be matched. */ 305 filter->l2_ovlan = 306 rte_be_to_cpu_16(vlan_spec->tci & 307 RTE_BE16(0x0fff)); 308 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 309 } else { 310 rte_flow_error_set(error, 311 EINVAL, 312 RTE_FLOW_ERROR_TYPE_ITEM, 313 item, 314 "VLAN mask is invalid"); 315 return -rte_errno; 316 } 317 if (vlan_mask->inner_type && 318 vlan_mask->inner_type != RTE_BE16(0xffff)) { 319 rte_flow_error_set(error, EINVAL, 320 RTE_FLOW_ERROR_TYPE_ITEM, 321 item, 322 "inner ethertype mask not" 323 " valid"); 324 return -rte_errno; 325 } 326 if (vlan_mask->inner_type) { 327 filter->ethertype = 328 rte_be_to_cpu_16(vlan_spec->inner_type); 329 en |= en_ethertype; 330 } 331 332 break; 333 case RTE_FLOW_ITEM_TYPE_IPV4: 334 /* If mask is not involved, we could use EM filters. */ 335 ipv4_spec = item->spec; 336 337 if (!item->spec) 338 break; 339 340 if (item->mask) 341 ipv4_mask = item->mask; 342 else 343 ipv4_mask = &rte_flow_item_ipv4_mask; 344 345 /* Only IP DST and SRC fields are maskable. */ 346 if (ipv4_mask->hdr.version_ihl || 347 ipv4_mask->hdr.type_of_service || 348 ipv4_mask->hdr.total_length || 349 ipv4_mask->hdr.packet_id || 350 ipv4_mask->hdr.fragment_offset || 351 ipv4_mask->hdr.time_to_live || 352 ipv4_mask->hdr.next_proto_id || 353 ipv4_mask->hdr.hdr_checksum) { 354 rte_flow_error_set(error, 355 EINVAL, 356 RTE_FLOW_ERROR_TYPE_ITEM, 357 item, 358 "Invalid IPv4 mask."); 359 return -rte_errno; 360 } 361 362 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; 363 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; 364 365 if (use_ntuple) 366 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 367 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 368 else 369 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 370 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 371 372 if (ipv4_mask->hdr.src_addr) { 373 filter->src_ipaddr_mask[0] = 374 ipv4_mask->hdr.src_addr; 375 en |= !use_ntuple ? 0 : 376 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 377 } 378 379 if (ipv4_mask->hdr.dst_addr) { 380 filter->dst_ipaddr_mask[0] = 381 ipv4_mask->hdr.dst_addr; 382 en |= !use_ntuple ? 0 : 383 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 384 } 385 386 filter->ip_addr_type = use_ntuple ? 387 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : 388 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 389 390 if (ipv4_spec->hdr.next_proto_id) { 391 filter->ip_protocol = 392 ipv4_spec->hdr.next_proto_id; 393 if (use_ntuple) 394 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 395 else 396 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; 397 } 398 break; 399 case RTE_FLOW_ITEM_TYPE_IPV6: 400 ipv6_spec = item->spec; 401 402 if (!item->spec) 403 break; 404 405 if (item->mask) 406 ipv6_mask = item->mask; 407 else 408 ipv6_mask = &rte_flow_item_ipv6_mask; 409 410 /* Only IP DST and SRC fields are maskable. */ 411 if (ipv6_mask->hdr.vtc_flow || 412 ipv6_mask->hdr.payload_len || 413 ipv6_mask->hdr.proto || 414 ipv6_mask->hdr.hop_limits) { 415 rte_flow_error_set(error, 416 EINVAL, 417 RTE_FLOW_ERROR_TYPE_ITEM, 418 item, 419 "Invalid IPv6 mask."); 420 return -rte_errno; 421 } 422 423 if (use_ntuple) 424 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 425 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 426 else 427 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | 428 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; 429 430 rte_memcpy(filter->src_ipaddr, 431 ipv6_spec->hdr.src_addr, 16); 432 rte_memcpy(filter->dst_ipaddr, 433 ipv6_spec->hdr.dst_addr, 16); 434 435 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, 436 16)) { 437 rte_memcpy(filter->src_ipaddr_mask, 438 ipv6_mask->hdr.src_addr, 16); 439 en |= !use_ntuple ? 0 : 440 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 441 } 442 443 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, 444 16)) { 445 rte_memcpy(filter->dst_ipaddr_mask, 446 ipv6_mask->hdr.dst_addr, 16); 447 en |= !use_ntuple ? 0 : 448 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 449 } 450 451 filter->ip_addr_type = use_ntuple ? 452 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : 453 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 454 break; 455 case RTE_FLOW_ITEM_TYPE_TCP: 456 tcp_spec = item->spec; 457 458 if (!item->spec) 459 break; 460 461 if (item->mask) 462 tcp_mask = item->mask; 463 else 464 tcp_mask = &rte_flow_item_tcp_mask; 465 466 /* Check TCP mask. Only DST & SRC ports are maskable */ 467 if (tcp_mask->hdr.sent_seq || 468 tcp_mask->hdr.recv_ack || 469 tcp_mask->hdr.data_off || 470 tcp_mask->hdr.tcp_flags || 471 tcp_mask->hdr.rx_win || 472 tcp_mask->hdr.cksum || 473 tcp_mask->hdr.tcp_urp) { 474 rte_flow_error_set(error, 475 EINVAL, 476 RTE_FLOW_ERROR_TYPE_ITEM, 477 item, 478 "Invalid TCP mask"); 479 return -rte_errno; 480 } 481 482 filter->src_port = tcp_spec->hdr.src_port; 483 filter->dst_port = tcp_spec->hdr.dst_port; 484 485 if (use_ntuple) 486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 487 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 488 else 489 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 490 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 491 492 if (tcp_mask->hdr.dst_port) { 493 filter->dst_port_mask = tcp_mask->hdr.dst_port; 494 en |= !use_ntuple ? 0 : 495 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 496 } 497 498 if (tcp_mask->hdr.src_port) { 499 filter->src_port_mask = tcp_mask->hdr.src_port; 500 en |= !use_ntuple ? 0 : 501 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 502 } 503 break; 504 case RTE_FLOW_ITEM_TYPE_UDP: 505 udp_spec = item->spec; 506 507 if (!item->spec) 508 break; 509 510 if (item->mask) 511 udp_mask = item->mask; 512 else 513 udp_mask = &rte_flow_item_udp_mask; 514 515 if (udp_mask->hdr.dgram_len || 516 udp_mask->hdr.dgram_cksum) { 517 rte_flow_error_set(error, 518 EINVAL, 519 RTE_FLOW_ERROR_TYPE_ITEM, 520 item, 521 "Invalid UDP mask"); 522 return -rte_errno; 523 } 524 525 filter->src_port = udp_spec->hdr.src_port; 526 filter->dst_port = udp_spec->hdr.dst_port; 527 528 if (use_ntuple) 529 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 530 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 531 else 532 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | 533 EM_FLOW_ALLOC_INPUT_EN_DST_PORT; 534 535 if (udp_mask->hdr.dst_port) { 536 filter->dst_port_mask = udp_mask->hdr.dst_port; 537 en |= !use_ntuple ? 0 : 538 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 539 } 540 541 if (udp_mask->hdr.src_port) { 542 filter->src_port_mask = udp_mask->hdr.src_port; 543 en |= !use_ntuple ? 0 : 544 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 545 } 546 break; 547 case RTE_FLOW_ITEM_TYPE_VXLAN: 548 vxlan_spec = item->spec; 549 vxlan_mask = item->mask; 550 /* Check if VXLAN item is used to describe protocol. 551 * If yes, both spec and mask should be NULL. 552 * If no, both spec and mask shouldn't be NULL. 553 */ 554 if ((!vxlan_spec && vxlan_mask) || 555 (vxlan_spec && !vxlan_mask)) { 556 rte_flow_error_set(error, 557 EINVAL, 558 RTE_FLOW_ERROR_TYPE_ITEM, 559 item, 560 "Invalid VXLAN item"); 561 return -rte_errno; 562 } 563 564 if (!vxlan_spec && !vxlan_mask) { 565 filter->tunnel_type = 566 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 567 break; 568 } 569 570 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || 571 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || 572 vxlan_spec->flags != 0x8) { 573 rte_flow_error_set(error, 574 EINVAL, 575 RTE_FLOW_ERROR_TYPE_ITEM, 576 item, 577 "Invalid VXLAN item"); 578 return -rte_errno; 579 } 580 581 /* Check if VNI is masked. */ 582 if (vxlan_mask != NULL) { 583 vni_masked = 584 !!memcmp(vxlan_mask->vni, vni_mask, 585 RTE_DIM(vni_mask)); 586 if (vni_masked) { 587 rte_flow_error_set 588 (error, 589 EINVAL, 590 RTE_FLOW_ERROR_TYPE_ITEM, 591 item, 592 "Invalid VNI mask"); 593 return -rte_errno; 594 } 595 596 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 597 vxlan_spec->vni, 3); 598 filter->vni = 599 rte_be_to_cpu_32(tenant_id_be); 600 filter->tunnel_type = 601 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 602 } 603 break; 604 case RTE_FLOW_ITEM_TYPE_NVGRE: 605 nvgre_spec = item->spec; 606 nvgre_mask = item->mask; 607 /* Check if NVGRE item is used to describe protocol. 608 * If yes, both spec and mask should be NULL. 609 * If no, both spec and mask shouldn't be NULL. 610 */ 611 if ((!nvgre_spec && nvgre_mask) || 612 (nvgre_spec && !nvgre_mask)) { 613 rte_flow_error_set(error, 614 EINVAL, 615 RTE_FLOW_ERROR_TYPE_ITEM, 616 item, 617 "Invalid NVGRE item"); 618 return -rte_errno; 619 } 620 621 if (!nvgre_spec && !nvgre_mask) { 622 filter->tunnel_type = 623 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 624 break; 625 } 626 627 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || 628 nvgre_spec->protocol != 0x6558) { 629 rte_flow_error_set(error, 630 EINVAL, 631 RTE_FLOW_ERROR_TYPE_ITEM, 632 item, 633 "Invalid NVGRE item"); 634 return -rte_errno; 635 } 636 637 if (nvgre_spec && nvgre_mask) { 638 tni_masked = 639 !!memcmp(nvgre_mask->tni, tni_mask, 640 RTE_DIM(tni_mask)); 641 if (tni_masked) { 642 rte_flow_error_set 643 (error, 644 EINVAL, 645 RTE_FLOW_ERROR_TYPE_ITEM, 646 item, 647 "Invalid TNI mask"); 648 return -rte_errno; 649 } 650 rte_memcpy(((uint8_t *)&tenant_id_be + 1), 651 nvgre_spec->tni, 3); 652 filter->vni = 653 rte_be_to_cpu_32(tenant_id_be); 654 filter->tunnel_type = 655 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 656 } 657 break; 658 659 case RTE_FLOW_ITEM_TYPE_GRE: 660 gre_spec = (const struct rte_flow_item_gre *)item->spec; 661 gre_mask = (const struct rte_flow_item_gre *)item->mask; 662 663 /* 664 *Check if GRE item is used to describe protocol. 665 * If yes, both spec and mask should be NULL. 666 * If no, both spec and mask shouldn't be NULL. 667 */ 668 if (!!gre_spec ^ !!gre_mask) { 669 rte_flow_error_set(error, EINVAL, 670 RTE_FLOW_ERROR_TYPE_ITEM, 671 item, 672 "Invalid GRE item"); 673 return -rte_errno; 674 } 675 676 if (!gre_spec && !gre_mask) { 677 filter->tunnel_type = 678 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE; 679 break; 680 } 681 break; 682 683 case RTE_FLOW_ITEM_TYPE_VF: 684 vf_spec = item->spec; 685 vf = vf_spec->id; 686 if (!BNXT_PF(bp)) { 687 rte_flow_error_set(error, 688 EINVAL, 689 RTE_FLOW_ERROR_TYPE_ITEM, 690 item, 691 "Configuring on a VF!"); 692 return -rte_errno; 693 } 694 695 if (vf >= bp->pdev->max_vfs) { 696 rte_flow_error_set(error, 697 EINVAL, 698 RTE_FLOW_ERROR_TYPE_ITEM, 699 item, 700 "Incorrect VF id!"); 701 return -rte_errno; 702 } 703 704 if (!attr->transfer) { 705 rte_flow_error_set(error, 706 ENOTSUP, 707 RTE_FLOW_ERROR_TYPE_ITEM, 708 item, 709 "Matching VF traffic without" 710 " affecting it (transfer attribute)" 711 " is unsupported"); 712 return -rte_errno; 713 } 714 715 filter->mirror_vnic_id = 716 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 717 if (dflt_vnic < 0) { 718 /* This simply indicates there's no driver 719 * loaded. This is not an error. 720 */ 721 rte_flow_error_set 722 (error, 723 EINVAL, 724 RTE_FLOW_ERROR_TYPE_ITEM, 725 item, 726 "Unable to get default VNIC for VF"); 727 return -rte_errno; 728 } 729 730 filter->mirror_vnic_id = dflt_vnic; 731 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 732 break; 733 default: 734 break; 735 } 736 item++; 737 } 738 filter->enables = en; 739 filter->valid_flags = valid_flags; 740 741 /* Items parsed but no filter to create in HW. */ 742 if (filter->enables == 0 && filter->valid_flags == 0) 743 filter->filter_type = HWRM_CFA_CONFIG; 744 745 return 0; 746 } 747 748 /* Parse attributes */ 749 static int 750 bnxt_flow_parse_attr(const struct rte_flow_attr *attr, 751 struct rte_flow_error *error) 752 { 753 /* Must be input direction */ 754 if (!attr->ingress) { 755 rte_flow_error_set(error, 756 EINVAL, 757 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 758 attr, 759 "Only support ingress."); 760 return -rte_errno; 761 } 762 763 /* Not supported */ 764 if (attr->egress) { 765 rte_flow_error_set(error, 766 EINVAL, 767 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 768 attr, 769 "No support for egress."); 770 return -rte_errno; 771 } 772 773 return 0; 774 } 775 776 static struct bnxt_filter_info * 777 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 778 { 779 struct bnxt_filter_info *mf, *f0; 780 struct bnxt_vnic_info *vnic0; 781 int i; 782 783 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 784 f0 = STAILQ_FIRST(&vnic0->filter); 785 786 /* This flow has same DST MAC as the port/l2 filter. */ 787 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0) 788 return f0; 789 790 for (i = bp->max_vnics - 1; i >= 0; i--) { 791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 792 793 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 794 continue; 795 796 STAILQ_FOREACH(mf, &vnic->filter, next) { 797 798 if (mf->matching_l2_fltr_ptr) 799 continue; 800 801 if (mf->ethertype == nf->ethertype && 802 mf->l2_ovlan == nf->l2_ovlan && 803 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 804 mf->l2_ivlan == nf->l2_ivlan && 805 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 806 !memcmp(mf->src_macaddr, nf->src_macaddr, 807 RTE_ETHER_ADDR_LEN) && 808 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 809 RTE_ETHER_ADDR_LEN)) 810 return mf; 811 } 812 } 813 return NULL; 814 } 815 816 static struct bnxt_filter_info * 817 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 818 struct bnxt_vnic_info *vnic) 819 { 820 struct bnxt_filter_info *filter1; 821 int rc; 822 823 /* Alloc new L2 filter. 824 * This flow needs MAC filter which does not match any existing 825 * L2 filters. 826 */ 827 filter1 = bnxt_get_unused_filter(bp); 828 if (filter1 == NULL) 829 return NULL; 830 831 memcpy(filter1, nf, sizeof(*filter1)); 832 833 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; 834 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 835 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 836 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) { 837 filter1->flags |= 838 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 839 PMD_DRV_LOG(DEBUG, "Create Outer filter\n"); 840 } 841 842 if (nf->filter_type == HWRM_CFA_L2_FILTER && 843 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || 844 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) { 845 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n"); 846 filter1->flags |= 847 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID; 848 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN); 849 } else { 850 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); 851 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN); 852 } 853 854 if (nf->priority && 855 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG || 856 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 857 /* Tell the FW where to place the filter in the table. */ 858 if (nf->priority > 65535) { 859 filter1->pri_hint = 860 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER; 861 /* This will place the filter in TCAM */ 862 filter1->l2_filter_id_hint = (uint64_t)-1; 863 } 864 } 865 866 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG | 867 BNXT_FLOW_L2_SRC_VALID_FLAG | 868 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 869 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { 870 filter1->enables = 871 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 872 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; 873 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 874 } 875 876 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) { 877 filter1->flags |= 878 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP; 879 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) { 880 /* Num VLANs for drop filter will/should be 0. 881 * If the req is memset to 0, then the count will 882 * be automatically set to 0. 883 */ 884 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) { 885 filter1->enables |= 886 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS; 887 } else { 888 filter1->enables |= 889 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS; 890 filter1->flags |= 891 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 892 } 893 } 894 } 895 896 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, 897 filter1); 898 if (rc) { 899 bnxt_free_filter(bp, filter1); 900 return NULL; 901 } 902 return filter1; 903 } 904 905 struct bnxt_filter_info * 906 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, 907 struct bnxt_vnic_info *vnic) 908 { 909 struct bnxt_filter_info *l2_filter = NULL; 910 911 l2_filter = bnxt_find_matching_l2_filter(bp, nf); 912 if (l2_filter) { 913 l2_filter->l2_ref_cnt++; 914 } else { 915 l2_filter = bnxt_create_l2_filter(bp, nf, vnic); 916 if (l2_filter) { 917 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); 918 l2_filter->vnic = vnic; 919 } 920 } 921 nf->matching_l2_fltr_ptr = l2_filter; 922 923 return l2_filter; 924 } 925 926 static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic) 927 { 928 if (vnic->rx_queue_cnt > 1) 929 bnxt_hwrm_vnic_ctx_free(bp, vnic); 930 931 bnxt_hwrm_vnic_free(bp, vnic); 932 933 rte_free(vnic->fw_grp_ids); 934 vnic->fw_grp_ids = NULL; 935 936 vnic->rx_queue_cnt = 0; 937 } 938 939 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic, 940 const struct rte_flow_action *act, 941 struct rte_flow_error *error) 942 { 943 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 944 uint64_t rx_offloads = dev_conf->rxmode.offloads; 945 int rc; 946 947 if (bp->nr_vnics > bp->max_vnics - 1) 948 return rte_flow_error_set(error, EINVAL, 949 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 950 NULL, 951 "Group id is invalid"); 952 953 rc = bnxt_vnic_grp_alloc(bp, vnic); 954 if (rc) 955 return rte_flow_error_set(error, -rc, 956 RTE_FLOW_ERROR_TYPE_ACTION, 957 act, 958 "Failed to alloc VNIC group"); 959 960 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 961 if (rc) { 962 rte_flow_error_set(error, -rc, 963 RTE_FLOW_ERROR_TYPE_ACTION, 964 act, 965 "Failed to alloc VNIC"); 966 goto ret; 967 } 968 969 /* RSS context is required only when there is more than one RSS ring */ 970 if (vnic->rx_queue_cnt > 1) { 971 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 972 if (rc) { 973 rte_flow_error_set(error, -rc, 974 RTE_FLOW_ERROR_TYPE_ACTION, 975 act, 976 "Failed to alloc VNIC context"); 977 goto ret; 978 } 979 } 980 981 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 982 vnic->vlan_strip = true; 983 else 984 vnic->vlan_strip = false; 985 986 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 987 if (rc) { 988 rte_flow_error_set(error, -rc, 989 RTE_FLOW_ERROR_TYPE_ACTION, 990 act, 991 "Failed to configure VNIC"); 992 goto ret; 993 } 994 995 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 996 if (rc) { 997 rte_flow_error_set(error, -rc, 998 RTE_FLOW_ERROR_TYPE_ACTION, 999 act, 1000 "Failed to configure VNIC plcmode"); 1001 goto ret; 1002 } 1003 1004 bp->nr_vnics++; 1005 1006 return 0; 1007 1008 ret: 1009 bnxt_vnic_cleanup(bp, vnic); 1010 return rc; 1011 } 1012 1013 static int match_vnic_rss_cfg(struct bnxt *bp, 1014 struct bnxt_vnic_info *vnic, 1015 const struct rte_flow_action_rss *rss) 1016 { 1017 unsigned int match = 0, i; 1018 1019 if (vnic->rx_queue_cnt != rss->queue_num) 1020 return -EINVAL; 1021 1022 for (i = 0; i < rss->queue_num; i++) { 1023 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt && 1024 !bp->rx_queues[rss->queue[i]]->rx_started) 1025 return -EINVAL; 1026 } 1027 1028 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1029 int j; 1030 1031 for (j = 0; j < vnic->rx_queue_cnt; j++) { 1032 if (bp->grp_info[rss->queue[i]].fw_grp_id == 1033 vnic->fw_grp_ids[j]) 1034 match++; 1035 } 1036 } 1037 1038 if (match != vnic->rx_queue_cnt) { 1039 PMD_DRV_LOG(ERR, 1040 "VNIC queue count %d vs queues matched %d\n", 1041 match, vnic->rx_queue_cnt); 1042 return -EINVAL; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static void 1049 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, 1050 struct bnxt_filter_info *filter1, 1051 int use_ntuple) 1052 { 1053 if (!use_ntuple && 1054 !(filter->valid_flags & 1055 ~(BNXT_FLOW_L2_DST_VALID_FLAG | 1056 BNXT_FLOW_L2_SRC_VALID_FLAG | 1057 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | 1058 BNXT_FLOW_L2_INNER_DST_VALID_FLAG | 1059 BNXT_FLOW_L2_DROP_FLAG | 1060 BNXT_FLOW_PARSE_INNER_FLAG))) { 1061 filter->flags = filter1->flags; 1062 filter->enables = filter1->enables; 1063 filter->filter_type = HWRM_CFA_L2_FILTER; 1064 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN); 1065 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1066 filter->pri_hint = filter1->pri_hint; 1067 filter->l2_filter_id_hint = filter1->l2_filter_id_hint; 1068 } 1069 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1070 filter->l2_ref_cnt = filter1->l2_ref_cnt; 1071 filter->flow_id = filter1->flow_id; 1072 PMD_DRV_LOG(DEBUG, 1073 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n", 1074 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); 1075 } 1076 1077 /* Valid actions supported along with RSS are count and mark. */ 1078 static int 1079 bnxt_validate_rss_action(const struct rte_flow_action actions[]) 1080 { 1081 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1082 switch (actions->type) { 1083 case RTE_FLOW_ACTION_TYPE_VOID: 1084 break; 1085 case RTE_FLOW_ACTION_TYPE_RSS: 1086 break; 1087 case RTE_FLOW_ACTION_TYPE_MARK: 1088 break; 1089 case RTE_FLOW_ACTION_TYPE_COUNT: 1090 break; 1091 default: 1092 return -ENOTSUP; 1093 } 1094 } 1095 1096 return 0; 1097 } 1098 1099 static int 1100 bnxt_get_vnic(struct bnxt *bp, uint32_t group) 1101 { 1102 int vnic_id = 0; 1103 1104 /* For legacy NS3 based implementations, 1105 * group_id will be mapped to a VNIC ID. 1106 */ 1107 if (BNXT_STINGRAY(bp)) 1108 vnic_id = group; 1109 1110 /* Non NS3 cases, group_id will be ignored. 1111 * Setting will be configured on default VNIC. 1112 */ 1113 return vnic_id; 1114 } 1115 1116 static int 1117 bnxt_vnic_rss_cfg_update(struct bnxt *bp, 1118 struct bnxt_vnic_info *vnic, 1119 const struct rte_flow_action *act, 1120 struct rte_flow_error *error) 1121 { 1122 const struct rte_flow_action_rss *rss; 1123 unsigned int rss_idx, i; 1124 uint16_t hash_type; 1125 uint64_t types; 1126 int rc; 1127 1128 rss = (const struct rte_flow_action_rss *)act->conf; 1129 1130 /* Currently only Toeplitz hash is supported. */ 1131 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1132 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { 1133 rte_flow_error_set(error, 1134 ENOTSUP, 1135 RTE_FLOW_ERROR_TYPE_ACTION, 1136 act, 1137 "Unsupported RSS hash function"); 1138 rc = -rte_errno; 1139 goto ret; 1140 } 1141 1142 /* key_len should match the hash key supported by hardware */ 1143 if (rss->key_len != 0 && rss->key_len != HW_HASH_KEY_SIZE) { 1144 rte_flow_error_set(error, 1145 EINVAL, 1146 RTE_FLOW_ERROR_TYPE_ACTION, 1147 act, 1148 "Incorrect hash key parameters"); 1149 rc = -rte_errno; 1150 goto ret; 1151 } 1152 1153 /* Currently RSS hash on inner and outer headers are supported. 1154 * 0 => Default setting 1155 * 1 => Inner 1156 * 2 => Outer 1157 */ 1158 if (rss->level > 2) { 1159 rte_flow_error_set(error, 1160 ENOTSUP, 1161 RTE_FLOW_ERROR_TYPE_ACTION, 1162 act, 1163 "Unsupported hash level"); 1164 rc = -rte_errno; 1165 goto ret; 1166 } 1167 1168 if ((rss->queue_num == 0 && rss->queue != NULL) || 1169 (rss->queue_num != 0 && rss->queue == NULL)) { 1170 rte_flow_error_set(error, 1171 EINVAL, 1172 RTE_FLOW_ERROR_TYPE_ACTION, 1173 act, 1174 "Invalid queue config specified"); 1175 rc = -rte_errno; 1176 goto ret; 1177 } 1178 1179 /* If RSS types is 0, use a best effort configuration */ 1180 types = rss->types ? rss->types : ETH_RSS_IPV4; 1181 1182 hash_type = bnxt_rte_to_hwrm_hash_types(types); 1183 1184 /* If requested types can't be supported, leave existing settings */ 1185 if (hash_type) 1186 vnic->hash_type = hash_type; 1187 1188 vnic->hash_mode = 1189 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1190 1191 /* Update RSS key only if key_len != 0 */ 1192 if (rss->key_len != 0) 1193 memcpy(vnic->rss_hash_key, rss->key, rss->key_len); 1194 1195 if (rss->queue_num == 0) 1196 goto skip_rss_table; 1197 1198 /* Validate Rx queues */ 1199 for (i = 0; i < rss->queue_num; i++) { 1200 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]); 1201 1202 if (rss->queue[i] >= bp->rx_nr_rings || 1203 !bp->rx_queues[rss->queue[i]]) { 1204 rte_flow_error_set(error, 1205 EINVAL, 1206 RTE_FLOW_ERROR_TYPE_ACTION, 1207 act, 1208 "Invalid queue ID for RSS"); 1209 rc = -rte_errno; 1210 goto ret; 1211 } 1212 } 1213 1214 /* Prepare the indirection table */ 1215 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; rss_idx++) { 1216 struct bnxt_rx_queue *rxq; 1217 uint32_t idx; 1218 1219 idx = rss->queue[rss_idx % rss->queue_num]; 1220 1221 if (BNXT_CHIP_P5(bp)) { 1222 rxq = bp->rx_queues[idx]; 1223 vnic->rss_table[rss_idx * 2] = 1224 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1225 vnic->rss_table[rss_idx * 2 + 1] = 1226 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1227 } else { 1228 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[idx]; 1229 } 1230 } 1231 1232 skip_rss_table: 1233 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1234 ret: 1235 return rc; 1236 } 1237 1238 static int 1239 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, 1240 const struct rte_flow_item pattern[], 1241 const struct rte_flow_action actions[], 1242 const struct rte_flow_attr *attr, 1243 struct rte_flow_error *error, 1244 struct bnxt_filter_info *filter) 1245 { 1246 const struct rte_flow_action *act = 1247 bnxt_flow_non_void_action(actions); 1248 struct bnxt *bp = dev->data->dev_private; 1249 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1250 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL; 1251 const struct rte_flow_action_queue *act_q; 1252 const struct rte_flow_action_vf *act_vf; 1253 struct bnxt_filter_info *filter1 = NULL; 1254 const struct rte_flow_action_rss *rss; 1255 struct bnxt_rx_queue *rxq = NULL; 1256 int dflt_vnic, vnic_id; 1257 unsigned int rss_idx; 1258 uint32_t vf = 0, i; 1259 int rc, use_ntuple; 1260 1261 rc = 1262 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter); 1263 if (rc != 0) 1264 goto ret; 1265 1266 rc = bnxt_flow_parse_attr(attr, error); 1267 if (rc != 0) 1268 goto ret; 1269 1270 /* Since we support ingress attribute only - right now. */ 1271 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1272 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; 1273 1274 use_ntuple = bnxt_filter_type_check(pattern, error); 1275 1276 start: 1277 switch (act->type) { 1278 case RTE_FLOW_ACTION_TYPE_QUEUE: 1279 /* Allow this flow. Redirect to a VNIC. */ 1280 act_q = (const struct rte_flow_action_queue *)act->conf; 1281 if (!act_q->index || act_q->index >= bp->rx_nr_rings) { 1282 rte_flow_error_set(error, 1283 EINVAL, 1284 RTE_FLOW_ERROR_TYPE_ACTION, 1285 act, 1286 "Invalid queue ID."); 1287 rc = -rte_errno; 1288 goto ret; 1289 } 1290 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); 1291 1292 if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) { 1293 filter->flags = 1294 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX; 1295 filter->dst_id = act_q->index; 1296 goto skip_vnic_alloc; 1297 } 1298 1299 vnic_id = attr->group; 1300 if (!vnic_id) { 1301 PMD_DRV_LOG(DEBUG, "Group id is 0\n"); 1302 vnic_id = act_q->index; 1303 } 1304 1305 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1306 1307 vnic = &bp->vnic_info[vnic_id]; 1308 if (vnic->rx_queue_cnt) { 1309 if (vnic->start_grp_id != act_q->index) { 1310 PMD_DRV_LOG(ERR, 1311 "VNIC already in use\n"); 1312 rte_flow_error_set(error, 1313 EINVAL, 1314 RTE_FLOW_ERROR_TYPE_ACTION, 1315 act, 1316 "VNIC already in use"); 1317 rc = -rte_errno; 1318 goto ret; 1319 } 1320 goto use_vnic; 1321 } 1322 1323 rxq = bp->rx_queues[act_q->index]; 1324 1325 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq && 1326 vnic->fw_vnic_id != INVALID_HW_RING_ID) 1327 goto use_vnic; 1328 1329 if (!rxq) { 1330 PMD_DRV_LOG(ERR, 1331 "Queue invalid or used with other VNIC\n"); 1332 rte_flow_error_set(error, 1333 EINVAL, 1334 RTE_FLOW_ERROR_TYPE_ACTION, 1335 act, 1336 "Queue invalid queue or in use"); 1337 rc = -rte_errno; 1338 goto ret; 1339 } 1340 1341 rxq->vnic = vnic; 1342 rxq->rx_started = 1; 1343 vnic->rx_queue_cnt++; 1344 vnic->start_grp_id = act_q->index; 1345 vnic->end_grp_id = act_q->index; 1346 vnic->func_default = 0; //This is not a default VNIC. 1347 1348 PMD_DRV_LOG(DEBUG, "VNIC found\n"); 1349 1350 rc = bnxt_vnic_prep(bp, vnic, act, error); 1351 if (rc) 1352 goto ret; 1353 1354 PMD_DRV_LOG(DEBUG, 1355 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1356 act_q->index, vnic, vnic->fw_grp_ids); 1357 1358 use_vnic: 1359 vnic->ff_pool_idx = vnic_id; 1360 PMD_DRV_LOG(DEBUG, 1361 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); 1362 filter->dst_id = vnic->fw_vnic_id; 1363 skip_vnic_alloc: 1364 /* For ntuple filter, create the L2 filter with default VNIC. 1365 * The user specified redirect queue will be set while creating 1366 * the ntuple filter in hardware. 1367 */ 1368 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 1369 if (use_ntuple) 1370 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1371 else 1372 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1373 if (filter1 == NULL) { 1374 rte_flow_error_set(error, 1375 ENOSPC, 1376 RTE_FLOW_ERROR_TYPE_ACTION, 1377 act, 1378 "Filter not available"); 1379 rc = -rte_errno; 1380 goto ret; 1381 } 1382 1383 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n", 1384 filter, filter1, filter1->l2_ref_cnt); 1385 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1386 break; 1387 case RTE_FLOW_ACTION_TYPE_DROP: 1388 vnic0 = &bp->vnic_info[0]; 1389 filter->dst_id = vnic0->fw_vnic_id; 1390 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG; 1391 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1392 if (filter1 == NULL) { 1393 rte_flow_error_set(error, 1394 ENOSPC, 1395 RTE_FLOW_ERROR_TYPE_ACTION, 1396 act, 1397 "Filter not available"); 1398 rc = -rte_errno; 1399 goto ret; 1400 } 1401 1402 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1403 filter->flags = 1404 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; 1405 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1406 filter->flags = 1407 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1408 1409 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1410 break; 1411 case RTE_FLOW_ACTION_TYPE_COUNT: 1412 vnic0 = &bp->vnic_info[0]; 1413 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1414 if (filter1 == NULL) { 1415 rte_flow_error_set(error, 1416 ENOSPC, 1417 RTE_FLOW_ERROR_TYPE_ACTION, 1418 act, 1419 "New filter not available"); 1420 rc = -rte_errno; 1421 goto ret; 1422 } 1423 1424 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1425 filter->flow_id = filter1->flow_id; 1426 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; 1427 break; 1428 case RTE_FLOW_ACTION_TYPE_VF: 1429 act_vf = (const struct rte_flow_action_vf *)act->conf; 1430 vf = act_vf->id; 1431 1432 if (filter->tunnel_type == 1433 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || 1434 filter->tunnel_type == 1435 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) { 1436 /* If issued on a VF, ensure id is 0 and is trusted */ 1437 if (BNXT_VF(bp)) { 1438 if (!BNXT_VF_IS_TRUSTED(bp) || vf) { 1439 rte_flow_error_set(error, EINVAL, 1440 RTE_FLOW_ERROR_TYPE_ACTION, 1441 act, 1442 "Incorrect VF"); 1443 rc = -rte_errno; 1444 goto ret; 1445 } 1446 } 1447 1448 filter->enables |= filter->tunnel_type; 1449 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER; 1450 goto done; 1451 } 1452 1453 if (vf >= bp->pdev->max_vfs) { 1454 rte_flow_error_set(error, 1455 EINVAL, 1456 RTE_FLOW_ERROR_TYPE_ACTION, 1457 act, 1458 "Incorrect VF id!"); 1459 rc = -rte_errno; 1460 goto ret; 1461 } 1462 1463 filter->mirror_vnic_id = 1464 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 1465 if (dflt_vnic < 0) { 1466 /* This simply indicates there's no driver loaded. 1467 * This is not an error. 1468 */ 1469 rte_flow_error_set(error, 1470 EINVAL, 1471 RTE_FLOW_ERROR_TYPE_ACTION, 1472 act, 1473 "Unable to get default VNIC for VF"); 1474 rc = -rte_errno; 1475 goto ret; 1476 } 1477 1478 filter->mirror_vnic_id = dflt_vnic; 1479 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; 1480 1481 vnic0 = &bp->vnic_info[0]; 1482 filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 1483 if (filter1 == NULL) { 1484 rte_flow_error_set(error, 1485 ENOSPC, 1486 RTE_FLOW_ERROR_TYPE_ACTION, 1487 act, 1488 "New filter not available"); 1489 rc = -rte_errno; 1490 goto ret; 1491 } 1492 1493 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1494 filter->flow_id = filter1->flow_id; 1495 break; 1496 case RTE_FLOW_ACTION_TYPE_RSS: 1497 rc = bnxt_validate_rss_action(actions); 1498 if (rc != 0) { 1499 rte_flow_error_set(error, 1500 EINVAL, 1501 RTE_FLOW_ERROR_TYPE_ACTION, 1502 act, 1503 "Invalid actions specified with RSS"); 1504 rc = -rte_errno; 1505 goto ret; 1506 } 1507 1508 rss = (const struct rte_flow_action_rss *)act->conf; 1509 1510 vnic_id = bnxt_get_vnic(bp, attr->group); 1511 1512 BNXT_VALID_VNIC_OR_RET(bp, vnic_id); 1513 vnic = &bp->vnic_info[vnic_id]; 1514 1515 /* 1516 * For non NS3 cases, rte_flow_items will not be considered 1517 * for RSS updates. 1518 */ 1519 if (filter->filter_type == HWRM_CFA_CONFIG) { 1520 /* RSS config update requested */ 1521 rc = bnxt_vnic_rss_cfg_update(bp, vnic, act, error); 1522 if (rc != 0) 1523 return -rte_errno; 1524 1525 filter->dst_id = vnic->fw_vnic_id; 1526 break; 1527 } 1528 1529 /* Check if requested RSS config matches RSS config of VNIC 1530 * only if it is not a fresh VNIC configuration. 1531 * Otherwise the existing VNIC configuration can be used. 1532 */ 1533 if (vnic->rx_queue_cnt) { 1534 rc = match_vnic_rss_cfg(bp, vnic, rss); 1535 if (rc) { 1536 PMD_DRV_LOG(ERR, 1537 "VNIC and RSS config mismatch\n"); 1538 rte_flow_error_set(error, 1539 EINVAL, 1540 RTE_FLOW_ERROR_TYPE_ACTION, 1541 act, 1542 "VNIC and RSS cfg mismatch"); 1543 rc = -rte_errno; 1544 goto ret; 1545 } 1546 goto vnic_found; 1547 } 1548 1549 for (i = 0; i < rss->queue_num; i++) { 1550 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", 1551 rss->queue[i]); 1552 1553 if (!rss->queue[i] || 1554 rss->queue[i] >= bp->rx_nr_rings || 1555 !bp->rx_queues[rss->queue[i]]) { 1556 rte_flow_error_set(error, 1557 EINVAL, 1558 RTE_FLOW_ERROR_TYPE_ACTION, 1559 act, 1560 "Invalid queue ID for RSS"); 1561 rc = -rte_errno; 1562 goto ret; 1563 } 1564 rxq = bp->rx_queues[rss->queue[i]]; 1565 1566 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] != 1567 INVALID_HW_RING_ID) { 1568 PMD_DRV_LOG(ERR, 1569 "queue active with other VNIC\n"); 1570 rte_flow_error_set(error, 1571 EINVAL, 1572 RTE_FLOW_ERROR_TYPE_ACTION, 1573 act, 1574 "Invalid queue ID for RSS"); 1575 rc = -rte_errno; 1576 goto ret; 1577 } 1578 1579 rxq->vnic = vnic; 1580 rxq->rx_started = 1; 1581 vnic->rx_queue_cnt++; 1582 } 1583 1584 vnic->start_grp_id = rss->queue[0]; 1585 vnic->end_grp_id = rss->queue[rss->queue_num - 1]; 1586 vnic->func_default = 0; //This is not a default VNIC. 1587 1588 rc = bnxt_vnic_prep(bp, vnic, act, error); 1589 if (rc) 1590 goto ret; 1591 1592 PMD_DRV_LOG(DEBUG, 1593 "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 1594 vnic_id, vnic, vnic->fw_grp_ids); 1595 1596 vnic->ff_pool_idx = vnic_id; 1597 PMD_DRV_LOG(DEBUG, 1598 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx); 1599 1600 /* This can be done only after vnic_grp_alloc is done. */ 1601 for (i = 0; i < vnic->rx_queue_cnt; i++) { 1602 vnic->fw_grp_ids[i] = 1603 bp->grp_info[rss->queue[i]].fw_grp_id; 1604 /* Make sure vnic0 does not use these rings. */ 1605 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] = 1606 INVALID_HW_RING_ID; 1607 } 1608 1609 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) { 1610 for (i = 0; i < vnic->rx_queue_cnt; i++) 1611 vnic->rss_table[rss_idx++] = 1612 vnic->fw_grp_ids[i]; 1613 } 1614 1615 /* Configure RSS only if the queue count is > 1 */ 1616 if (vnic->rx_queue_cnt > 1) { 1617 vnic->hash_type = 1618 bnxt_rte_to_hwrm_hash_types(rss->types); 1619 vnic->hash_mode = 1620 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); 1621 1622 if (!rss->key_len) { 1623 /* If hash key has not been specified, 1624 * use random hash key. 1625 */ 1626 bnxt_prandom_bytes(vnic->rss_hash_key, 1627 HW_HASH_KEY_SIZE); 1628 } else { 1629 if (rss->key_len > HW_HASH_KEY_SIZE) 1630 memcpy(vnic->rss_hash_key, 1631 rss->key, 1632 HW_HASH_KEY_SIZE); 1633 else 1634 memcpy(vnic->rss_hash_key, 1635 rss->key, 1636 rss->key_len); 1637 } 1638 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1639 } else { 1640 PMD_DRV_LOG(DEBUG, "No RSS config required\n"); 1641 } 1642 1643 vnic_found: 1644 filter->dst_id = vnic->fw_vnic_id; 1645 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 1646 if (filter1 == NULL) { 1647 rte_flow_error_set(error, 1648 ENOSPC, 1649 RTE_FLOW_ERROR_TYPE_ACTION, 1650 act, 1651 "New filter not available"); 1652 rc = -rte_errno; 1653 goto ret; 1654 } 1655 1656 PMD_DRV_LOG(DEBUG, "L2 filter created\n"); 1657 bnxt_update_filter_flags_en(filter, filter1, use_ntuple); 1658 break; 1659 case RTE_FLOW_ACTION_TYPE_MARK: 1660 if (bp->mark_table == NULL) { 1661 rte_flow_error_set(error, 1662 ENOMEM, 1663 RTE_FLOW_ERROR_TYPE_ACTION, 1664 act, 1665 "Mark table not allocated."); 1666 rc = -rte_errno; 1667 goto ret; 1668 } 1669 1670 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 1671 PMD_DRV_LOG(DEBUG, 1672 "Disabling vector processing for mark\n"); 1673 bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts; 1674 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1675 } 1676 1677 filter->valid_flags |= BNXT_FLOW_MARK_FLAG; 1678 filter->mark = ((const struct rte_flow_action_mark *) 1679 act->conf)->id; 1680 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark); 1681 break; 1682 default: 1683 rte_flow_error_set(error, 1684 EINVAL, 1685 RTE_FLOW_ERROR_TYPE_ACTION, 1686 act, 1687 "Invalid action."); 1688 rc = -rte_errno; 1689 goto ret; 1690 } 1691 1692 done: 1693 act = bnxt_flow_non_void_action(++act); 1694 while (act->type != RTE_FLOW_ACTION_TYPE_END) 1695 goto start; 1696 1697 return rc; 1698 ret: 1699 1700 if (filter1) { 1701 bnxt_hwrm_clear_l2_filter(bp, filter1); 1702 bnxt_free_filter(bp, filter1); 1703 } 1704 1705 if (rte_errno) { 1706 if (vnic && STAILQ_EMPTY(&vnic->filter)) 1707 vnic->rx_queue_cnt = 0; 1708 1709 if (rxq && !vnic->rx_queue_cnt) 1710 rxq->vnic = &bp->vnic_info[0]; 1711 } 1712 return -rte_errno; 1713 } 1714 1715 static 1716 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, 1717 struct bnxt_filter_info *filter) 1718 { 1719 struct bnxt_vnic_info *vnic = NULL; 1720 unsigned int i; 1721 1722 for (i = 0; i < bp->max_vnics; i++) { 1723 vnic = &bp->vnic_info[i]; 1724 if (vnic->fw_vnic_id != INVALID_VNIC_ID && 1725 filter->dst_id == vnic->fw_vnic_id) { 1726 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n", 1727 vnic->ff_pool_idx); 1728 return vnic; 1729 } 1730 } 1731 return NULL; 1732 } 1733 1734 static int 1735 bnxt_flow_validate(struct rte_eth_dev *dev, 1736 const struct rte_flow_attr *attr, 1737 const struct rte_flow_item pattern[], 1738 const struct rte_flow_action actions[], 1739 struct rte_flow_error *error) 1740 { 1741 struct bnxt *bp = dev->data->dev_private; 1742 struct bnxt_vnic_info *vnic = NULL; 1743 struct bnxt_filter_info *filter; 1744 int ret = 0; 1745 1746 bnxt_acquire_flow_lock(bp); 1747 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1748 if (ret != 0) { 1749 bnxt_release_flow_lock(bp); 1750 return ret; 1751 } 1752 1753 filter = bnxt_get_unused_filter(bp); 1754 if (filter == NULL) { 1755 rte_flow_error_set(error, ENOSPC, 1756 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1757 "Not enough resources for a new flow"); 1758 bnxt_release_flow_lock(bp); 1759 return -ENOSPC; 1760 } 1761 1762 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1763 error, filter); 1764 if (ret) 1765 goto exit; 1766 1767 vnic = find_matching_vnic(bp, filter); 1768 if (vnic) { 1769 if (STAILQ_EMPTY(&vnic->filter)) { 1770 bnxt_vnic_cleanup(bp, vnic); 1771 bp->nr_vnics--; 1772 PMD_DRV_LOG(DEBUG, "Free VNIC\n"); 1773 } 1774 } 1775 1776 if (filter->filter_type == HWRM_CFA_EM_FILTER) 1777 bnxt_hwrm_clear_em_filter(bp, filter); 1778 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1779 bnxt_hwrm_clear_ntuple_filter(bp, filter); 1780 else 1781 bnxt_hwrm_clear_l2_filter(bp, filter); 1782 1783 exit: 1784 /* No need to hold on to this filter if we are just validating flow */ 1785 bnxt_free_filter(bp, filter); 1786 bnxt_release_flow_lock(bp); 1787 1788 return ret; 1789 } 1790 1791 static void 1792 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, 1793 struct bnxt_filter_info *new_filter) 1794 { 1795 /* Clear the new L2 filter that was created in the previous step in 1796 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new 1797 * filter which points to the new destination queue and so we clear 1798 * the previous L2 filter. For ntuple filters, we are going to reuse 1799 * the old L2 filter and create new NTUPLE filter with this new 1800 * destination queue subsequently during bnxt_flow_create. So we 1801 * decrement the ref cnt of the L2 filter that would've been bumped 1802 * up previously in bnxt_validate_and_parse_flow as the old n-tuple 1803 * filter that was referencing it will be deleted now. 1804 */ 1805 bnxt_hwrm_clear_l2_filter(bp, old_filter); 1806 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { 1807 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); 1808 } else { 1809 if (new_filter->filter_type == HWRM_CFA_EM_FILTER) 1810 bnxt_hwrm_clear_em_filter(bp, old_filter); 1811 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER) 1812 bnxt_hwrm_clear_ntuple_filter(bp, old_filter); 1813 } 1814 } 1815 1816 static int 1817 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) 1818 { 1819 struct bnxt_filter_info *mf; 1820 struct rte_flow *flow; 1821 int i; 1822 1823 for (i = bp->max_vnics - 1; i >= 0; i--) { 1824 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1825 1826 if (vnic->fw_vnic_id == INVALID_VNIC_ID) 1827 continue; 1828 1829 STAILQ_FOREACH(flow, &vnic->flow_list, next) { 1830 mf = flow->filter; 1831 1832 if (mf->filter_type == nf->filter_type && 1833 mf->flags == nf->flags && 1834 mf->src_port == nf->src_port && 1835 mf->src_port_mask == nf->src_port_mask && 1836 mf->dst_port == nf->dst_port && 1837 mf->dst_port_mask == nf->dst_port_mask && 1838 mf->ip_protocol == nf->ip_protocol && 1839 mf->ip_addr_type == nf->ip_addr_type && 1840 mf->ethertype == nf->ethertype && 1841 mf->vni == nf->vni && 1842 mf->tunnel_type == nf->tunnel_type && 1843 mf->l2_ovlan == nf->l2_ovlan && 1844 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 1845 mf->l2_ivlan == nf->l2_ivlan && 1846 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 1847 !memcmp(mf->l2_addr, nf->l2_addr, 1848 RTE_ETHER_ADDR_LEN) && 1849 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 1850 RTE_ETHER_ADDR_LEN) && 1851 !memcmp(mf->src_macaddr, nf->src_macaddr, 1852 RTE_ETHER_ADDR_LEN) && 1853 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 1854 RTE_ETHER_ADDR_LEN) && 1855 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 1856 sizeof(nf->src_ipaddr)) && 1857 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 1858 sizeof(nf->src_ipaddr_mask)) && 1859 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 1860 sizeof(nf->dst_ipaddr)) && 1861 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 1862 sizeof(nf->dst_ipaddr_mask))) { 1863 if (mf->dst_id == nf->dst_id) 1864 return -EEXIST; 1865 /* Free the old filter, update flow 1866 * with new filter 1867 */ 1868 bnxt_update_filter(bp, mf, nf); 1869 STAILQ_REMOVE(&vnic->filter, mf, 1870 bnxt_filter_info, next); 1871 STAILQ_INSERT_TAIL(&vnic->filter, nf, next); 1872 bnxt_free_filter(bp, mf); 1873 flow->filter = nf; 1874 return -EXDEV; 1875 } 1876 } 1877 } 1878 return 0; 1879 } 1880 1881 static void 1882 bnxt_setup_flow_counter(struct bnxt *bp) 1883 { 1884 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && 1885 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { 1886 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1887 bnxt_flow_cnt_alarm_cb, 1888 (void *)bp); 1889 bp->flags |= BNXT_FLAG_FC_THREAD; 1890 } 1891 } 1892 1893 void bnxt_flow_cnt_alarm_cb(void *arg) 1894 { 1895 int rc = 0; 1896 struct bnxt *bp = arg; 1897 1898 if (!bp->flow_stat->rx_fc_out_tbl.va) { 1899 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); 1900 bnxt_cancel_fc_thread(bp); 1901 return; 1902 } 1903 1904 if (!bp->flow_stat->flow_count) { 1905 bnxt_cancel_fc_thread(bp); 1906 return; 1907 } 1908 1909 if (!bp->eth_dev->data->dev_started) { 1910 bnxt_cancel_fc_thread(bp); 1911 return; 1912 } 1913 1914 rc = bnxt_flow_stats_req(bp); 1915 if (rc) { 1916 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n"); 1917 return; 1918 } 1919 1920 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, 1921 bnxt_flow_cnt_alarm_cb, 1922 (void *)bp); 1923 } 1924 1925 1926 static struct rte_flow * 1927 bnxt_flow_create(struct rte_eth_dev *dev, 1928 const struct rte_flow_attr *attr, 1929 const struct rte_flow_item pattern[], 1930 const struct rte_flow_action actions[], 1931 struct rte_flow_error *error) 1932 { 1933 struct bnxt *bp = dev->data->dev_private; 1934 struct bnxt_vnic_info *vnic = NULL; 1935 struct bnxt_filter_info *filter; 1936 bool update_flow = false; 1937 struct rte_flow *flow; 1938 int ret = 0; 1939 uint32_t tun_type, flow_id; 1940 1941 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1942 rte_flow_error_set(error, EINVAL, 1943 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1944 "Failed to create flow, Not a Trusted VF!"); 1945 return NULL; 1946 } 1947 1948 if (!dev->data->dev_started) { 1949 rte_flow_error_set(error, 1950 EINVAL, 1951 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1952 NULL, 1953 "Device must be started"); 1954 return NULL; 1955 } 1956 1957 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); 1958 if (!flow) { 1959 rte_flow_error_set(error, ENOMEM, 1960 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1961 "Failed to allocate memory"); 1962 return flow; 1963 } 1964 1965 bnxt_acquire_flow_lock(bp); 1966 ret = bnxt_flow_args_validate(attr, pattern, actions, error); 1967 if (ret != 0) { 1968 PMD_DRV_LOG(ERR, "Not a validate flow.\n"); 1969 goto free_flow; 1970 } 1971 1972 filter = bnxt_get_unused_filter(bp); 1973 if (filter == NULL) { 1974 rte_flow_error_set(error, ENOSPC, 1975 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1976 "Not enough resources for a new flow"); 1977 goto free_flow; 1978 } 1979 1980 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, 1981 error, filter); 1982 if (ret != 0) 1983 goto free_filter; 1984 1985 ret = bnxt_match_filter(bp, filter); 1986 if (ret == -EEXIST) { 1987 PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); 1988 /* Clear the filter that was created as part of 1989 * validate_and_parse_flow() above 1990 */ 1991 bnxt_hwrm_clear_l2_filter(bp, filter); 1992 goto free_filter; 1993 } else if (ret == -EXDEV) { 1994 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); 1995 PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); 1996 update_flow = true; 1997 } 1998 1999 /* If tunnel redirection to a VF/PF is specified then only tunnel_type 2000 * is set and enable is set to the tunnel type. Issue hwrm cmd directly 2001 * in such a case. 2002 */ 2003 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2004 filter->enables == filter->tunnel_type) { 2005 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2006 if (ret) { 2007 rte_flow_error_set(error, -ret, 2008 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2009 "Unable to query tunnel to VF"); 2010 goto free_filter; 2011 } 2012 if (tun_type == (1U << filter->tunnel_type)) { 2013 ret = 2014 bnxt_hwrm_tunnel_redirect_free(bp, 2015 filter->tunnel_type); 2016 if (ret) { 2017 PMD_DRV_LOG(ERR, 2018 "Unable to free existing tunnel\n"); 2019 rte_flow_error_set(error, -ret, 2020 RTE_FLOW_ERROR_TYPE_HANDLE, 2021 NULL, 2022 "Unable to free preexisting " 2023 "tunnel on VF"); 2024 goto free_filter; 2025 } 2026 } 2027 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type); 2028 if (ret) { 2029 rte_flow_error_set(error, -ret, 2030 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2031 "Unable to redirect tunnel to VF"); 2032 goto free_filter; 2033 } 2034 vnic = &bp->vnic_info[0]; 2035 goto done; 2036 } 2037 2038 if (filter->filter_type == HWRM_CFA_EM_FILTER) { 2039 filter->enables |= 2040 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2041 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); 2042 if (ret != 0) { 2043 rte_flow_error_set(error, -ret, 2044 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2045 "Failed to create EM filter"); 2046 goto free_filter; 2047 } 2048 } 2049 2050 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { 2051 filter->enables |= 2052 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2053 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); 2054 if (ret != 0) { 2055 rte_flow_error_set(error, -ret, 2056 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2057 "Failed to create ntuple filter"); 2058 goto free_filter; 2059 } 2060 } 2061 2062 if (BNXT_RFS_NEEDS_VNIC(bp)) 2063 vnic = find_matching_vnic(bp, filter); 2064 else 2065 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2066 done: 2067 if (!ret || update_flow) { 2068 flow->filter = filter; 2069 flow->vnic = vnic; 2070 if (update_flow) { 2071 ret = -EXDEV; 2072 goto free_flow; 2073 } 2074 2075 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2076 PMD_DRV_LOG(DEBUG, 2077 "Mark action: mark id 0x%x, flow id 0x%x\n", 2078 filter->mark, filter->flow_id); 2079 2080 /* TCAM and EM should be 16-bit only. 2081 * Other modes not supported. 2082 */ 2083 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2084 if (bp->mark_table[flow_id].valid) { 2085 rte_flow_error_set(error, EEXIST, 2086 RTE_FLOW_ERROR_TYPE_HANDLE, 2087 NULL, 2088 "Flow with mark id exists"); 2089 bnxt_clear_one_vnic_filter(bp, filter); 2090 goto free_filter; 2091 } 2092 bp->mark_table[flow_id].valid = true; 2093 bp->mark_table[flow_id].mark_id = filter->mark; 2094 } 2095 2096 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2097 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); 2098 2099 if (BNXT_FLOW_XSTATS_EN(bp)) 2100 bp->flow_stat->flow_count++; 2101 bnxt_release_flow_lock(bp); 2102 bnxt_setup_flow_counter(bp); 2103 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); 2104 return flow; 2105 } 2106 2107 free_filter: 2108 bnxt_free_filter(bp, filter); 2109 free_flow: 2110 if (ret == -EEXIST) 2111 rte_flow_error_set(error, ret, 2112 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2113 "Matching Flow exists."); 2114 else if (ret == -EXDEV) 2115 rte_flow_error_set(error, 0, 2116 RTE_FLOW_ERROR_TYPE_NONE, NULL, 2117 "Flow with pattern exists, updating destination queue"); 2118 else if (!rte_errno) 2119 rte_flow_error_set(error, -ret, 2120 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2121 "Failed to create flow."); 2122 rte_free(flow); 2123 flow = NULL; 2124 bnxt_release_flow_lock(bp); 2125 return flow; 2126 } 2127 2128 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, 2129 struct bnxt_filter_info *filter, 2130 struct rte_flow_error *error) 2131 { 2132 uint16_t tun_dst_fid; 2133 uint32_t tun_type; 2134 int ret = 0; 2135 2136 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); 2137 if (ret) { 2138 rte_flow_error_set(error, -ret, 2139 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2140 "Unable to query tunnel to VF"); 2141 return ret; 2142 } 2143 if (tun_type == (1U << filter->tunnel_type)) { 2144 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type, 2145 &tun_dst_fid); 2146 if (ret) { 2147 rte_flow_error_set(error, -ret, 2148 RTE_FLOW_ERROR_TYPE_HANDLE, 2149 NULL, 2150 "tunnel_redirect info cmd fail"); 2151 return ret; 2152 } 2153 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n", 2154 tun_dst_fid + bp->first_vf_id, bp->fw_fid); 2155 2156 /* Tunnel doesn't belong to this VF, so don't send HWRM 2157 * cmd, just delete the flow from driver 2158 */ 2159 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) { 2160 PMD_DRV_LOG(ERR, 2161 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n"); 2162 } else { 2163 ret = bnxt_hwrm_tunnel_redirect_free(bp, 2164 filter->tunnel_type); 2165 if (ret) { 2166 rte_flow_error_set(error, -ret, 2167 RTE_FLOW_ERROR_TYPE_HANDLE, 2168 NULL, 2169 "Unable to free tunnel redirection"); 2170 return ret; 2171 } 2172 } 2173 } 2174 return ret; 2175 } 2176 2177 static int 2178 _bnxt_flow_destroy(struct bnxt *bp, 2179 struct rte_flow *flow, 2180 struct rte_flow_error *error) 2181 { 2182 struct bnxt_filter_info *filter; 2183 struct bnxt_vnic_info *vnic; 2184 int ret = 0; 2185 uint32_t flow_id; 2186 2187 filter = flow->filter; 2188 vnic = flow->vnic; 2189 2190 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && 2191 filter->enables == filter->tunnel_type) { 2192 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); 2193 if (!ret) 2194 goto done; 2195 else 2196 return ret; 2197 } 2198 2199 /* For config type, there is no filter in HW. Finish cleanup here */ 2200 if (filter->filter_type == HWRM_CFA_CONFIG) 2201 goto done; 2202 2203 ret = bnxt_match_filter(bp, filter); 2204 if (ret == 0) 2205 PMD_DRV_LOG(ERR, "Could not find matching flow\n"); 2206 2207 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { 2208 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; 2209 memset(&bp->mark_table[flow_id], 0, 2210 sizeof(bp->mark_table[flow_id])); 2211 filter->flow_id = 0; 2212 } 2213 2214 ret = bnxt_clear_one_vnic_filter(bp, filter); 2215 2216 done: 2217 if (!ret) { 2218 /* If it is a L2 drop filter, when the filter is created, 2219 * the FW updates the BC/MC records. 2220 * Once this filter is removed, issue the set_rx_mask command 2221 * to reset the BC/MC records in the HW to the settings 2222 * before the drop counter is created. 2223 */ 2224 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG) 2225 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]); 2226 2227 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); 2228 bnxt_free_filter(bp, filter); 2229 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); 2230 rte_free(flow); 2231 if (BNXT_FLOW_XSTATS_EN(bp)) 2232 bp->flow_stat->flow_count--; 2233 2234 /* If this was the last flow associated with this vnic, 2235 * switch the queue back to RSS pool. 2236 */ 2237 if (vnic && !vnic->func_default && 2238 STAILQ_EMPTY(&vnic->flow_list)) { 2239 bnxt_vnic_cleanup(bp, vnic); 2240 bp->nr_vnics--; 2241 } 2242 } else { 2243 rte_flow_error_set(error, -ret, 2244 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2245 "Failed to destroy flow."); 2246 } 2247 2248 return ret; 2249 } 2250 2251 static int 2252 bnxt_flow_destroy(struct rte_eth_dev *dev, 2253 struct rte_flow *flow, 2254 struct rte_flow_error *error) 2255 { 2256 struct bnxt *bp = dev->data->dev_private; 2257 int ret = 0; 2258 2259 bnxt_acquire_flow_lock(bp); 2260 if (!flow) { 2261 rte_flow_error_set(error, EINVAL, 2262 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2263 "Invalid flow: failed to destroy flow."); 2264 bnxt_release_flow_lock(bp); 2265 return -EINVAL; 2266 } 2267 2268 if (!flow->filter) { 2269 rte_flow_error_set(error, EINVAL, 2270 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2271 "Invalid flow: failed to destroy flow."); 2272 bnxt_release_flow_lock(bp); 2273 return -EINVAL; 2274 } 2275 ret = _bnxt_flow_destroy(bp, flow, error); 2276 bnxt_release_flow_lock(bp); 2277 2278 return ret; 2279 } 2280 2281 void bnxt_cancel_fc_thread(struct bnxt *bp) 2282 { 2283 bp->flags &= ~BNXT_FLAG_FC_THREAD; 2284 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); 2285 } 2286 2287 static int 2288 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) 2289 { 2290 struct bnxt *bp = dev->data->dev_private; 2291 struct bnxt_vnic_info *vnic; 2292 struct rte_flow *flow; 2293 unsigned int i; 2294 int ret = 0; 2295 2296 bnxt_acquire_flow_lock(bp); 2297 for (i = 0; i < bp->max_vnics; i++) { 2298 vnic = &bp->vnic_info[i]; 2299 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) 2300 continue; 2301 2302 while (!STAILQ_EMPTY(&vnic->flow_list)) { 2303 flow = STAILQ_FIRST(&vnic->flow_list); 2304 2305 if (!flow->filter) 2306 continue; 2307 2308 ret = _bnxt_flow_destroy(bp, flow, error); 2309 if (ret) 2310 break; 2311 } 2312 } 2313 2314 bnxt_cancel_fc_thread(bp); 2315 bnxt_release_flow_lock(bp); 2316 2317 return ret; 2318 } 2319 2320 const struct rte_flow_ops bnxt_flow_ops = { 2321 .validate = bnxt_flow_validate, 2322 .create = bnxt_flow_create, 2323 .destroy = bnxt_flow_destroy, 2324 .flush = bnxt_flow_flush, 2325 }; 2326