1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <rte_memzone.h> 7 #include <rte_memcpy.h> 8 #include <rte_string_fns.h> 9 #include <rte_ether.h> 10 11 #include "vnic_dev.h" 12 #include "vnic_resource.h" 13 #include "vnic_devcmd.h" 14 #include "vnic_nic.h" 15 #include "vnic_stats.h" 16 #include "vnic_flowman.h" 17 18 19 enum vnic_proxy_type { 20 PROXY_NONE, 21 PROXY_BY_BDF, 22 PROXY_BY_INDEX, 23 }; 24 25 struct vnic_res { 26 void __iomem *vaddr; 27 dma_addr_t bus_addr; 28 unsigned int count; 29 }; 30 31 struct vnic_intr_coal_timer_info { 32 uint32_t mul; 33 uint32_t div; 34 uint32_t max_usec; 35 }; 36 37 struct vnic_dev { 38 void *priv; 39 struct rte_pci_device *pdev; 40 struct vnic_res res[RES_TYPE_MAX]; 41 enum vnic_dev_intr_mode intr_mode; 42 struct vnic_devcmd __iomem *devcmd; 43 struct vnic_devcmd_notify *notify; 44 struct vnic_devcmd_notify notify_copy; 45 dma_addr_t notify_pa; 46 uint32_t notify_sz; 47 dma_addr_t linkstatus_pa; 48 struct vnic_stats *stats; 49 dma_addr_t stats_pa; 50 struct vnic_devcmd_fw_info *fw_info; 51 dma_addr_t fw_info_pa; 52 struct fm_info *flowman_info; 53 dma_addr_t flowman_info_pa; 54 enum vnic_proxy_type proxy; 55 uint32_t proxy_index; 56 uint64_t args[VNIC_DEVCMD_NARGS]; 57 int in_reset; 58 struct vnic_intr_coal_timer_info intr_coal_timer_info; 59 void *(*alloc_consistent)(void *priv, size_t size, 60 dma_addr_t *dma_handle, uint8_t *name); 61 void (*free_consistent)(void *priv, 62 size_t size, void *vaddr, 63 dma_addr_t dma_handle); 64 /* 65 * Used to serialize devcmd access, currently from PF and its 66 * VF representors. When there are no representors, lock is 67 * not used. 68 */ 69 int locked; 70 void (*lock)(void *priv); 71 void (*unlock)(void *priv); 72 struct vnic_dev *pf_vdev; 73 int vf_id; 74 }; 75 76 #define VNIC_MAX_RES_HDR_SIZE \ 77 (sizeof(struct vnic_resource_header) + \ 78 sizeof(struct vnic_resource) * RES_TYPE_MAX) 79 #define VNIC_RES_STRIDE 128 80 81 void *vnic_dev_priv(struct vnic_dev *vdev) 82 { 83 return vdev->priv; 84 } 85 86 void vnic_register_cbacks(struct vnic_dev *vdev, 87 void *(*alloc_consistent)(void *priv, size_t size, 88 dma_addr_t *dma_handle, uint8_t *name), 89 void (*free_consistent)(void *priv, 90 size_t size, void *vaddr, 91 dma_addr_t dma_handle)) 92 { 93 vdev->alloc_consistent = alloc_consistent; 94 vdev->free_consistent = free_consistent; 95 } 96 97 void vnic_register_lock(struct vnic_dev *vdev, void (*lock)(void *priv), 98 void (*unlock)(void *priv)) 99 { 100 vdev->lock = lock; 101 vdev->unlock = unlock; 102 vdev->locked = 0; 103 } 104 105 static int vnic_dev_discover_res(struct vnic_dev *vdev, 106 struct vnic_dev_bar *bar, unsigned int num_bars) 107 { 108 struct vnic_resource_header __iomem *rh; 109 struct mgmt_barmap_hdr __iomem *mrh; 110 struct vnic_resource __iomem *r; 111 uint8_t type; 112 113 if (num_bars == 0) 114 return -EINVAL; 115 116 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 117 pr_err("vNIC BAR0 res hdr length error\n"); 118 return -EINVAL; 119 } 120 121 rh = bar->vaddr; 122 mrh = bar->vaddr; 123 if (!rh) { 124 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 125 return -EINVAL; 126 } 127 128 /* Check for mgmt vnic in addition to normal vnic */ 129 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 130 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 131 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 132 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 133 pr_err("vNIC BAR0 res magic/version error " \ 134 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 135 VNIC_RES_MAGIC, VNIC_RES_VERSION, 136 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 137 ioread32(&rh->magic), ioread32(&rh->version)); 138 return -EINVAL; 139 } 140 } 141 142 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 143 r = (struct vnic_resource __iomem *)(mrh + 1); 144 else 145 r = (struct vnic_resource __iomem *)(rh + 1); 146 147 148 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 149 uint8_t bar_num = ioread8(&r->bar); 150 uint32_t bar_offset = ioread32(&r->bar_offset); 151 uint32_t count = ioread32(&r->count); 152 uint32_t len; 153 154 r++; 155 156 if (bar_num >= num_bars) 157 continue; 158 159 if (!bar[bar_num].len || !bar[bar_num].vaddr) 160 continue; 161 162 switch (type) { 163 case RES_TYPE_WQ: 164 case RES_TYPE_RQ: 165 case RES_TYPE_CQ: 166 case RES_TYPE_INTR_CTRL: 167 /* each count is stride bytes long */ 168 len = count * VNIC_RES_STRIDE; 169 if (len + bar_offset > bar[bar_num].len) { 170 pr_err("vNIC BAR0 resource %d " \ 171 "out-of-bounds, offset 0x%x + " \ 172 "size 0x%x > bar len 0x%lx\n", 173 type, bar_offset, 174 len, 175 bar[bar_num].len); 176 return -EINVAL; 177 } 178 break; 179 case RES_TYPE_INTR_PBA_LEGACY: 180 case RES_TYPE_DEVCMD: 181 len = count; 182 break; 183 default: 184 continue; 185 } 186 187 vdev->res[type].count = count; 188 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 189 bar_offset; 190 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 191 } 192 193 return 0; 194 } 195 196 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 197 enum vnic_res_type type) 198 { 199 return vdev->res[type].count; 200 } 201 202 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 203 unsigned int index) 204 { 205 if (!vdev->res[type].vaddr) 206 return NULL; 207 208 switch (type) { 209 case RES_TYPE_WQ: 210 case RES_TYPE_RQ: 211 case RES_TYPE_CQ: 212 case RES_TYPE_INTR_CTRL: 213 return (char __iomem *)vdev->res[type].vaddr + 214 index * VNIC_RES_STRIDE; 215 default: 216 return (char __iomem *)vdev->res[type].vaddr; 217 } 218 } 219 220 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 221 unsigned int desc_count, unsigned int desc_size) 222 { 223 /* The base address of the desc rings must be 512 byte aligned. 224 * Descriptor count is aligned to groups of 32 descriptors. A 225 * count of 0 means the maximum 4096 descriptors. Descriptor 226 * size is aligned to 16 bytes. 227 */ 228 229 unsigned int count_align = 32; 230 unsigned int desc_align = 16; 231 232 ring->base_align = 512; 233 234 if (desc_count == 0) 235 desc_count = 4096; 236 237 ring->desc_count = VNIC_ALIGN(desc_count, count_align); 238 239 ring->desc_size = VNIC_ALIGN(desc_size, desc_align); 240 241 ring->size = ring->desc_count * ring->desc_size; 242 ring->size_unaligned = ring->size + ring->base_align; 243 244 return ring->size_unaligned; 245 } 246 247 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 248 { 249 memset(ring->descs, 0, ring->size); 250 } 251 252 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, 253 struct vnic_dev_ring *ring, 254 unsigned int desc_count, unsigned int desc_size, 255 __rte_unused unsigned int socket_id, 256 char *z_name) 257 { 258 void *alloc_addr; 259 dma_addr_t alloc_pa = 0; 260 261 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 262 alloc_addr = vdev->alloc_consistent(vdev->priv, 263 ring->size_unaligned, 264 &alloc_pa, (uint8_t *)z_name); 265 if (!alloc_addr) { 266 pr_err("Failed to allocate ring (size=%d), aborting\n", 267 (int)ring->size); 268 return -ENOMEM; 269 } 270 ring->descs_unaligned = alloc_addr; 271 if (!alloc_pa) { 272 pr_err("Failed to map allocated ring (size=%d), aborting\n", 273 (int)ring->size); 274 vdev->free_consistent(vdev->priv, 275 ring->size_unaligned, 276 alloc_addr, 277 alloc_pa); 278 return -ENOMEM; 279 } 280 ring->base_addr_unaligned = alloc_pa; 281 282 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, 283 ring->base_align); 284 ring->descs = (uint8_t *)ring->descs_unaligned + 285 (ring->base_addr - ring->base_addr_unaligned); 286 287 vnic_dev_clear_desc_ring(ring); 288 289 ring->desc_avail = ring->desc_count - 1; 290 291 return 0; 292 } 293 294 void vnic_dev_free_desc_ring(__rte_unused struct vnic_dev *vdev, 295 struct vnic_dev_ring *ring) 296 { 297 if (ring->descs) { 298 vdev->free_consistent(vdev->priv, 299 ring->size_unaligned, 300 ring->descs_unaligned, 301 ring->base_addr_unaligned); 302 ring->descs = NULL; 303 } 304 } 305 306 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 307 int wait) 308 { 309 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 310 unsigned int i; 311 int delay; 312 uint32_t status; 313 int err; 314 315 status = ioread32(&devcmd->status); 316 if (status == 0xFFFFFFFF) { 317 /* PCI-e target device is gone */ 318 return -ENODEV; 319 } 320 if (status & STAT_BUSY) { 321 322 pr_err("Busy devcmd %d\n", _CMD_N(cmd)); 323 return -EBUSY; 324 } 325 326 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 327 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 328 writeq(vdev->args[i], &devcmd->args[i]); 329 rte_wmb(); /* complete all writes initiated till now */ 330 } 331 332 iowrite32(cmd, &devcmd->cmd); 333 334 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 335 return 0; 336 337 for (delay = 0; delay < wait; delay++) { 338 339 usleep(100); 340 341 status = ioread32(&devcmd->status); 342 if (status == 0xFFFFFFFF) { 343 /* PCI-e target device is gone */ 344 return -ENODEV; 345 } 346 347 if (!(status & STAT_BUSY)) { 348 if (status & STAT_ERROR) { 349 err = -(int)readq(&devcmd->args[0]); 350 if (cmd != CMD_CAPABILITY && 351 cmd != CMD_OVERLAY_OFFLOAD_CTRL && 352 cmd != CMD_GET_SUPP_FEATURE_VER) 353 pr_err("Devcmd %d failed " \ 354 "with error code %d\n", 355 _CMD_N(cmd), err); 356 return err; 357 } 358 359 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 360 rte_rmb();/* finish all reads */ 361 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 362 vdev->args[i] = readq(&devcmd->args[i]); 363 } 364 365 return 0; 366 } 367 } 368 369 pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); 370 return -ETIMEDOUT; 371 } 372 373 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 374 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 375 uint64_t *args, int nargs, int wait) 376 { 377 uint32_t status; 378 int err; 379 380 /* 381 * Proxy command consumes 2 arguments. One for proxy index, 382 * the other is for command to be proxied 383 */ 384 if (nargs > VNIC_DEVCMD_NARGS - 2) { 385 pr_err("number of args %d exceeds the maximum\n", nargs); 386 return -EINVAL; 387 } 388 memset(vdev->args, 0, sizeof(vdev->args)); 389 390 vdev->args[0] = vdev->proxy_index; 391 vdev->args[1] = cmd; 392 memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); 393 394 err = _vnic_dev_cmd(vdev, proxy_cmd, wait); 395 if (err) 396 return err; 397 398 status = (uint32_t)vdev->args[0]; 399 if (status & STAT_ERROR) { 400 err = (int)vdev->args[1]; 401 if (err != ERR_ECMDUNKNOWN || 402 cmd != CMD_CAPABILITY) 403 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); 404 return err; 405 } 406 407 memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); 408 409 return 0; 410 } 411 412 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 413 enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait) 414 { 415 int err; 416 417 if (nargs > VNIC_DEVCMD_NARGS) { 418 pr_err("number of args %d exceeds the maximum\n", nargs); 419 return -EINVAL; 420 } 421 memset(vdev->args, 0, sizeof(vdev->args)); 422 memcpy(vdev->args, args, nargs * sizeof(args[0])); 423 424 err = _vnic_dev_cmd(vdev, cmd, wait); 425 426 memcpy(args, vdev->args, nargs * sizeof(args[0])); 427 428 return err; 429 } 430 431 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, uint16_t index) 432 { 433 vdev->proxy = PROXY_BY_INDEX; 434 vdev->proxy_index = index; 435 } 436 437 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) 438 { 439 vdev->proxy = PROXY_NONE; 440 vdev->proxy_index = 0; 441 } 442 443 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 444 uint64_t *a0, uint64_t *a1, int wait) 445 { 446 uint64_t args[2]; 447 bool vf_rep; 448 int vf_idx; 449 int err; 450 451 vf_rep = false; 452 if (vdev->pf_vdev) { 453 vf_rep = true; 454 vf_idx = vdev->vf_id; 455 /* Everything below assumes PF vdev */ 456 vdev = vdev->pf_vdev; 457 } 458 if (vdev->lock) 459 vdev->lock(vdev->priv); 460 /* For VF representor, proxy devcmd to VF index */ 461 if (vf_rep) 462 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 463 464 args[0] = *a0; 465 args[1] = *a1; 466 memset(vdev->args, 0, sizeof(vdev->args)); 467 468 switch (vdev->proxy) { 469 case PROXY_BY_INDEX: 470 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 471 args, ARRAY_SIZE(args), wait); 472 break; 473 case PROXY_BY_BDF: 474 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 475 args, ARRAY_SIZE(args), wait); 476 break; 477 case PROXY_NONE: 478 default: 479 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); 480 break; 481 } 482 483 if (vf_rep) 484 vnic_dev_cmd_proxy_end(vdev); 485 if (vdev->unlock) 486 vdev->unlock(vdev->priv); 487 if (err == 0) { 488 *a0 = args[0]; 489 *a1 = args[1]; 490 } 491 492 return err; 493 } 494 495 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 496 uint64_t *args, int nargs, int wait) 497 { 498 bool vf_rep; 499 int vf_idx; 500 int err; 501 502 vf_rep = false; 503 if (vdev->pf_vdev) { 504 vf_rep = true; 505 vf_idx = vdev->vf_id; 506 vdev = vdev->pf_vdev; 507 } 508 if (vdev->lock) 509 vdev->lock(vdev->priv); 510 if (vf_rep) 511 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 512 513 switch (vdev->proxy) { 514 case PROXY_BY_INDEX: 515 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 516 args, nargs, wait); 517 break; 518 case PROXY_BY_BDF: 519 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 520 args, nargs, wait); 521 break; 522 case PROXY_NONE: 523 default: 524 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); 525 break; 526 } 527 528 if (vf_rep) 529 vnic_dev_cmd_proxy_end(vdev); 530 if (vdev->unlock) 531 vdev->unlock(vdev->priv); 532 return err; 533 } 534 535 int vnic_dev_fw_info(struct vnic_dev *vdev, 536 struct vnic_devcmd_fw_info **fw_info) 537 { 538 char name[RTE_MEMZONE_NAMESIZE]; 539 uint64_t a0, a1 = 0; 540 int wait = 1000; 541 int err = 0; 542 static uint32_t instance; 543 544 if (!vdev->fw_info) { 545 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u", 546 instance++); 547 vdev->fw_info = vdev->alloc_consistent(vdev->priv, 548 sizeof(struct vnic_devcmd_fw_info), 549 &vdev->fw_info_pa, (uint8_t *)name); 550 if (!vdev->fw_info) 551 return -ENOMEM; 552 a0 = vdev->fw_info_pa; 553 a1 = sizeof(struct vnic_devcmd_fw_info); 554 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, 555 &a0, &a1, wait); 556 } 557 *fw_info = vdev->fw_info; 558 return err; 559 } 560 561 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args, 562 int nargs) 563 { 564 memset(args, 0, nargs * sizeof(*args)); 565 args[0] = CMD_ADD_ADV_FILTER; 566 args[1] = FILTER_CAP_MODE_V1_FLAG; 567 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); 568 } 569 570 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) 571 { 572 uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0; 573 int wait = 1000; 574 int err; 575 576 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 577 if (err) 578 return 0; 579 return (a1 >= (uint32_t)FILTER_DPDK_1); 580 } 581 582 int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs) 583 { 584 int wait = 1000; 585 586 return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait); 587 } 588 589 static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode, 590 uint8_t *filter_actions) 591 { 592 char name[RTE_MEMZONE_NAMESIZE]; 593 uint64_t args[3]; 594 uint64_t ops; 595 static uint32_t instance; 596 597 /* Advanced filtering is a prerequisite */ 598 if (!vnic_dev_capable_adv_filters(vdev)) 599 return 0; 600 /* flowman devcmd available? */ 601 if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP)) 602 return 0; 603 /* Have the version we are using? */ 604 args[0] = FM_API_VERSION_QUERY; 605 if (vnic_dev_flowman_cmd(vdev, args, 1)) 606 return 0; 607 if ((args[0] & (1ULL << FM_VERSION)) == 0) 608 return 0; 609 /* Select the version */ 610 args[0] = FM_API_VERSION_SELECT; 611 args[1] = FM_VERSION; 612 if (vnic_dev_flowman_cmd(vdev, args, 2)) 613 return 0; 614 /* Can we get fm_info? */ 615 if (!vdev->flowman_info) { 616 snprintf((char *)name, sizeof(name), "vnic_fm_info-%u", 617 instance++); 618 vdev->flowman_info = vdev->alloc_consistent(vdev->priv, 619 sizeof(struct fm_info), 620 &vdev->flowman_info_pa, (uint8_t *)name); 621 if (!vdev->flowman_info) 622 return 0; 623 } 624 args[0] = FM_INFO_QUERY; 625 args[1] = vdev->flowman_info_pa; 626 args[2] = sizeof(struct fm_info); 627 if (vnic_dev_flowman_cmd(vdev, args, 3)) 628 return 0; 629 /* Have required operations? */ 630 ops = (1ULL << FMOP_END) | 631 (1ULL << FMOP_DROP) | 632 (1ULL << FMOP_RQ_STEER) | 633 (1ULL << FMOP_EXACT_MATCH) | 634 (1ULL << FMOP_MARK) | 635 (1ULL << FMOP_TAG) | 636 (1ULL << FMOP_EG_HAIRPIN) | 637 (1ULL << FMOP_ENCAP) | 638 (1ULL << FMOP_DECAP_NOSTRIP); 639 if ((vdev->flowman_info->fm_op_mask & ops) != ops) 640 return 0; 641 /* Good to use flowman now */ 642 *mode = FILTER_FLOWMAN; 643 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG | 644 FILTER_ACTION_FILTER_ID_FLAG | 645 FILTER_ACTION_COUNTER_FLAG | 646 FILTER_ACTION_DROP_FLAG; 647 return 1; 648 } 649 650 /* Determine the "best" filtering mode VIC is capable of. Returns one of 4 651 * value or 0 if filtering is unavailble: 652 * FILTER_FLOWMAN- flowman api capable 653 * FILTER_DPDK_1- advanced filters availabile 654 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that 655 * the IP layer must explicitly specified. I.e. cannot have a UDP 656 * filter that matches both IPv4 and IPv6. 657 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. 658 * all other filter types are not available. 659 * Retrun true in filter_tags if supported 660 */ 661 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode, 662 uint8_t *filter_actions) 663 { 664 uint64_t args[4]; 665 int err; 666 uint32_t max_level = 0; 667 668 /* If flowman is available, use it as it is the most capable API */ 669 if (vnic_dev_flowman_enable(vdev, mode, filter_actions)) 670 return 0; 671 672 err = vnic_dev_advanced_filters_cap(vdev, args, 4); 673 674 /* determine supported filter actions */ 675 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ 676 if (args[2] == FILTER_CAP_MODE_V1) 677 *filter_actions = args[3]; 678 679 if (err || ((args[0] == 1) && (args[1] == 0))) { 680 /* Adv filter Command not supported or adv filters available but 681 * not enabled. Try the normal filter capability command. 682 */ 683 args[0] = CMD_ADD_FILTER; 684 args[1] = 0; 685 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); 686 /* 687 * ERR_EPERM may be returned if, for example, vNIC is 688 * on a VF. It simply means no filtering is available 689 */ 690 if (err == -ERR_EPERM) { 691 *mode = 0; 692 return 0; 693 } 694 if (err) 695 return err; 696 max_level = args[1]; 697 goto parse_max_level; 698 } else if (args[2] == FILTER_CAP_MODE_V1) { 699 /* parse filter capability mask in args[1] */ 700 if (args[1] & FILTER_DPDK_1_FLAG) 701 *mode = FILTER_DPDK_1; 702 else if (args[1] & FILTER_USNIC_IP_FLAG) 703 *mode = FILTER_USNIC_IP; 704 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) 705 *mode = FILTER_IPV4_5TUPLE; 706 return 0; 707 } 708 max_level = args[1]; 709 parse_max_level: 710 if (max_level >= (uint32_t)FILTER_USNIC_IP) 711 *mode = FILTER_USNIC_IP; 712 else 713 *mode = FILTER_IPV4_5TUPLE; 714 return 0; 715 } 716 717 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, 718 bool *weak) 719 { 720 uint64_t a0 = CMD_NIC_CFG, a1 = 0; 721 int wait = 1000; 722 int err; 723 724 *cfg_chk = false; 725 *weak = false; 726 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 727 if (err == 0 && a0 != 0 && a1 != 0) { 728 *cfg_chk = true; 729 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); 730 } 731 } 732 733 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 734 { 735 uint64_t a0 = (uint32_t)cmd, a1 = 0; 736 int wait = 1000; 737 int err; 738 739 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 740 741 return !(err || a0); 742 } 743 744 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, 745 void *value) 746 { 747 uint64_t a0, a1; 748 int wait = 1000; 749 int err; 750 751 a0 = offset; 752 a1 = size; 753 754 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 755 756 switch (size) { 757 case 1: 758 *(uint8_t *)value = (uint8_t)a0; 759 break; 760 case 2: 761 *(uint16_t *)value = (uint16_t)a0; 762 break; 763 case 4: 764 *(uint32_t *)value = (uint32_t)a0; 765 break; 766 case 8: 767 *(uint64_t *)value = a0; 768 break; 769 default: 770 BUG(); 771 break; 772 } 773 774 return err; 775 } 776 777 int vnic_dev_stats_clear(struct vnic_dev *vdev) 778 { 779 uint64_t a0 = 0, a1 = 0; 780 int wait = 1000; 781 782 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 783 } 784 785 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 786 { 787 uint64_t a0, a1; 788 int wait = 1000; 789 790 if (!vdev->stats) 791 return -ENOMEM; 792 793 *stats = vdev->stats; 794 a0 = vdev->stats_pa; 795 a1 = sizeof(struct vnic_stats); 796 797 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 798 } 799 800 int vnic_dev_close(struct vnic_dev *vdev) 801 { 802 uint64_t a0 = 0, a1 = 0; 803 int wait = 1000; 804 805 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 806 } 807 808 int vnic_dev_enable_wait(struct vnic_dev *vdev) 809 { 810 uint64_t a0 = 0, a1 = 0; 811 int wait = 1000; 812 813 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 814 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 815 else 816 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 817 } 818 819 int vnic_dev_disable(struct vnic_dev *vdev) 820 { 821 uint64_t a0 = 0, a1 = 0; 822 int wait = 1000; 823 824 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 825 } 826 827 int vnic_dev_open(struct vnic_dev *vdev, int arg) 828 { 829 uint64_t a0 = (uint32_t)arg, a1 = 0; 830 int wait = 1000; 831 832 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 833 } 834 835 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 836 { 837 uint64_t a0 = 0, a1 = 0; 838 int wait = 1000; 839 int err; 840 841 *done = 0; 842 843 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 844 if (err) 845 return err; 846 847 *done = (a0 == 0); 848 849 return 0; 850 } 851 852 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr) 853 { 854 uint64_t a0 = 0, a1 = 0; 855 int wait = 1000; 856 int err, i; 857 858 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 859 mac_addr[i] = 0; 860 861 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 862 if (err) 863 return err; 864 865 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 866 mac_addr[i] = ((uint8_t *)&a0)[i]; 867 868 return 0; 869 } 870 871 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 872 int broadcast, int promisc, int allmulti) 873 { 874 uint64_t a0, a1 = 0; 875 int wait = 1000; 876 int err; 877 878 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 879 (multicast ? CMD_PFILTER_MULTICAST : 0) | 880 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 881 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 882 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 883 884 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 885 if (err) 886 pr_err("Can't set packet filter\n"); 887 888 return err; 889 } 890 891 int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr) 892 { 893 uint64_t a0 = 0, a1 = 0; 894 int wait = 1000; 895 int err; 896 int i; 897 898 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 899 ((uint8_t *)&a0)[i] = addr[i]; 900 901 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 902 if (err) 903 pr_err("Can't add addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 904 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 905 err); 906 907 return err; 908 } 909 910 int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr) 911 { 912 uint64_t a0 = 0, a1 = 0; 913 int wait = 1000; 914 int err; 915 int i; 916 917 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 918 ((uint8_t *)&a0)[i] = addr[i]; 919 920 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 921 if (err) 922 pr_err("Can't del addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 923 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 924 err); 925 926 return err; 927 } 928 929 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 930 uint8_t ig_vlan_rewrite_mode) 931 { 932 uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0; 933 int wait = 1000; 934 935 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 936 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 937 &a0, &a1, wait); 938 else 939 return 0; 940 } 941 942 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) 943 { 944 vdev->in_reset = state; 945 } 946 947 static inline int vnic_dev_in_reset(struct vnic_dev *vdev) 948 { 949 return vdev->in_reset; 950 } 951 952 int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 953 void *notify_addr, dma_addr_t notify_pa, uint16_t intr) 954 { 955 uint64_t a0, a1; 956 int wait = 1000; 957 int r; 958 959 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 960 if (!vnic_dev_in_reset(vdev)) { 961 vdev->notify = notify_addr; 962 vdev->notify_pa = notify_pa; 963 } 964 965 a0 = (uint64_t)notify_pa; 966 a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL; 967 a1 += sizeof(struct vnic_devcmd_notify); 968 969 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 970 if (!vnic_dev_in_reset(vdev)) 971 vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0; 972 973 return r; 974 } 975 976 int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr) 977 { 978 void *notify_addr = NULL; 979 dma_addr_t notify_pa = 0; 980 char name[RTE_MEMZONE_NAMESIZE]; 981 static uint32_t instance; 982 983 if (vdev->notify || vdev->notify_pa) { 984 return vnic_dev_notify_setcmd(vdev, vdev->notify, 985 vdev->notify_pa, intr); 986 } 987 if (!vnic_dev_in_reset(vdev)) { 988 snprintf((char *)name, sizeof(name), 989 "vnic_notify-%u", instance++); 990 notify_addr = vdev->alloc_consistent(vdev->priv, 991 sizeof(struct vnic_devcmd_notify), 992 ¬ify_pa, (uint8_t *)name); 993 if (!notify_addr) 994 return -ENOMEM; 995 } 996 997 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 998 } 999 1000 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 1001 { 1002 uint64_t a0, a1; 1003 int wait = 1000; 1004 int err; 1005 1006 a0 = 0; /* paddr = 0 to unset notify buffer */ 1007 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 1008 a1 += sizeof(struct vnic_devcmd_notify); 1009 1010 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 1011 if (!vnic_dev_in_reset(vdev)) { 1012 vdev->notify = NULL; 1013 vdev->notify_pa = 0; 1014 vdev->notify_sz = 0; 1015 } 1016 1017 return err; 1018 } 1019 1020 int vnic_dev_notify_unset(struct vnic_dev *vdev) 1021 { 1022 if (vdev->notify && !vnic_dev_in_reset(vdev)) { 1023 vdev->free_consistent(vdev->priv, 1024 sizeof(struct vnic_devcmd_notify), 1025 vdev->notify, 1026 vdev->notify_pa); 1027 } 1028 1029 return vnic_dev_notify_unsetcmd(vdev); 1030 } 1031 1032 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 1033 { 1034 uint32_t *words; 1035 unsigned int nwords = vdev->notify_sz / 4; 1036 unsigned int i; 1037 uint32_t csum; 1038 1039 if (!vdev->notify || !vdev->notify_sz) 1040 return 0; 1041 1042 do { 1043 csum = 0; 1044 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 1045 words = (uint32_t *)&vdev->notify_copy; 1046 for (i = 1; i < nwords; i++) 1047 csum += words[i]; 1048 } while (csum != words[0]); 1049 1050 return 1; 1051 } 1052 1053 int vnic_dev_init(struct vnic_dev *vdev, int arg) 1054 { 1055 uint64_t a0 = (uint32_t)arg, a1 = 0; 1056 int wait = 1000; 1057 int r = 0; 1058 1059 if (vnic_dev_capable(vdev, CMD_INIT)) 1060 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 1061 else { 1062 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 1063 if (a0 & CMD_INITF_DEFAULT_MAC) { 1064 /* Emulate these for old CMD_INIT_v1 which 1065 * didn't pass a0 so no CMD_INITF_*. 1066 */ 1067 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 1068 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 1069 } 1070 } 1071 return r; 1072 } 1073 1074 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 1075 { 1076 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 1077 vdev->intr_coal_timer_info.mul = 2; 1078 vdev->intr_coal_timer_info.div = 3; 1079 vdev->intr_coal_timer_info.max_usec = 1080 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 1081 } 1082 1083 int vnic_dev_link_status(struct vnic_dev *vdev) 1084 { 1085 if (!vnic_dev_notify_ready(vdev)) 1086 return 0; 1087 1088 return vdev->notify_copy.link_state; 1089 } 1090 1091 uint32_t vnic_dev_port_speed(struct vnic_dev *vdev) 1092 { 1093 if (!vnic_dev_notify_ready(vdev)) 1094 return 0; 1095 1096 return vdev->notify_copy.port_speed; 1097 } 1098 1099 uint32_t vnic_dev_mtu(struct vnic_dev *vdev) 1100 { 1101 if (!vnic_dev_notify_ready(vdev)) 1102 return 0; 1103 1104 return vdev->notify_copy.mtu; 1105 } 1106 1107 uint32_t vnic_dev_uif(struct vnic_dev *vdev) 1108 { 1109 if (!vnic_dev_notify_ready(vdev)) 1110 return 0; 1111 1112 return vdev->notify_copy.uif; 1113 } 1114 1115 uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, 1116 uint32_t usec) 1117 { 1118 return (usec * vdev->intr_coal_timer_info.mul) / 1119 vdev->intr_coal_timer_info.div; 1120 } 1121 1122 uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, 1123 uint32_t hw_cycles) 1124 { 1125 return (hw_cycles * vdev->intr_coal_timer_info.div) / 1126 vdev->intr_coal_timer_info.mul; 1127 } 1128 1129 uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 1130 { 1131 return vdev->intr_coal_timer_info.max_usec; 1132 } 1133 1134 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) 1135 { 1136 char name[RTE_MEMZONE_NAMESIZE]; 1137 static uint32_t instance; 1138 1139 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); 1140 vdev->stats = vdev->alloc_consistent(vdev->priv, 1141 sizeof(struct vnic_stats), 1142 &vdev->stats_pa, (uint8_t *)name); 1143 return vdev->stats == NULL ? -ENOMEM : 0; 1144 } 1145 1146 void vnic_dev_unregister(struct vnic_dev *vdev) 1147 { 1148 if (vdev) { 1149 if (vdev->notify) 1150 vdev->free_consistent(vdev->priv, 1151 sizeof(struct vnic_devcmd_notify), 1152 vdev->notify, 1153 vdev->notify_pa); 1154 if (vdev->stats) 1155 vdev->free_consistent(vdev->priv, 1156 sizeof(struct vnic_stats), 1157 vdev->stats, vdev->stats_pa); 1158 if (vdev->flowman_info) 1159 vdev->free_consistent(vdev->priv, 1160 sizeof(struct fm_info), 1161 vdev->flowman_info, vdev->flowman_info_pa); 1162 if (vdev->fw_info) 1163 vdev->free_consistent(vdev->priv, 1164 sizeof(struct vnic_devcmd_fw_info), 1165 vdev->fw_info, vdev->fw_info_pa); 1166 rte_free(vdev); 1167 } 1168 } 1169 1170 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 1171 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, 1172 unsigned int num_bars) 1173 { 1174 if (!vdev) { 1175 char name[RTE_MEMZONE_NAMESIZE]; 1176 snprintf((char *)name, sizeof(name), "%s-vnic", 1177 pdev->device.name); 1178 vdev = (struct vnic_dev *)rte_zmalloc_socket(name, 1179 sizeof(struct vnic_dev), 1180 RTE_CACHE_LINE_SIZE, 1181 pdev->device.numa_node); 1182 if (!vdev) 1183 return NULL; 1184 } 1185 1186 vdev->priv = priv; 1187 vdev->pdev = pdev; 1188 1189 if (vnic_dev_discover_res(vdev, bar, num_bars)) 1190 goto err_out; 1191 1192 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 1193 if (!vdev->devcmd) 1194 goto err_out; 1195 1196 return vdev; 1197 1198 err_out: 1199 vnic_dev_unregister(vdev); 1200 return NULL; 1201 } 1202 1203 struct vnic_dev *vnic_vf_rep_register(void *priv, struct vnic_dev *pf_vdev, 1204 int vf_id) 1205 { 1206 struct vnic_dev *vdev; 1207 1208 vdev = (struct vnic_dev *)rte_zmalloc("enic-vf-rep-vdev", 1209 sizeof(struct vnic_dev), RTE_CACHE_LINE_SIZE); 1210 if (!vdev) 1211 return NULL; 1212 vdev->priv = priv; 1213 vdev->pf_vdev = pf_vdev; 1214 vdev->vf_id = vf_id; 1215 vdev->alloc_consistent = pf_vdev->alloc_consistent; 1216 vdev->free_consistent = pf_vdev->free_consistent; 1217 return vdev; 1218 } 1219 1220 /* 1221 * vnic_dev_classifier: Add/Delete classifier entries 1222 * @vdev: vdev of the device 1223 * @cmd: CLSF_ADD for Add filter 1224 * CLSF_DEL for Delete filter 1225 * @entry: In case of ADD filter, the caller passes the RQ number in this 1226 * variable. 1227 * This function stores the filter_id returned by the 1228 * firmware in the same variable before return; 1229 * 1230 * In case of DEL filter, the caller passes the RQ number. Return 1231 * value is irrelevant. 1232 * @data: filter data 1233 * @action: action data 1234 */ 1235 int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry, 1236 struct filter_v2 *data, struct filter_action_v2 *action_v2) 1237 { 1238 uint64_t a0 = 0, a1 = 0; 1239 int wait = 1000; 1240 dma_addr_t tlv_pa; 1241 int ret = -EINVAL; 1242 struct filter_tlv *tlv, *tlv_va; 1243 uint64_t tlv_size; 1244 uint32_t filter_size, action_size; 1245 static unsigned int unique_id; 1246 char z_name[RTE_MEMZONE_NAMESIZE]; 1247 enum vnic_devcmd_cmd dev_cmd; 1248 1249 if (cmd == CLSF_ADD) { 1250 dev_cmd = (data->type >= FILTER_DPDK_1) ? 1251 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; 1252 1253 filter_size = vnic_filter_size(data); 1254 action_size = vnic_action_size(action_v2); 1255 1256 tlv_size = filter_size + action_size + 1257 2*sizeof(struct filter_tlv); 1258 snprintf((char *)z_name, sizeof(z_name), 1259 "vnic_clsf_%u", unique_id++); 1260 tlv_va = vdev->alloc_consistent(vdev->priv, 1261 tlv_size, &tlv_pa, (uint8_t *)z_name); 1262 if (!tlv_va) 1263 return -ENOMEM; 1264 tlv = tlv_va; 1265 a0 = tlv_pa; 1266 a1 = tlv_size; 1267 memset(tlv, 0, tlv_size); 1268 tlv->type = CLSF_TLV_FILTER; 1269 tlv->length = filter_size; 1270 memcpy(&tlv->val, (void *)data, filter_size); 1271 1272 tlv = (struct filter_tlv *)((char *)tlv + 1273 sizeof(struct filter_tlv) + 1274 filter_size); 1275 1276 tlv->type = CLSF_TLV_ACTION; 1277 tlv->length = action_size; 1278 memcpy(&tlv->val, (void *)action_v2, action_size); 1279 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); 1280 *entry = (uint16_t)a0; 1281 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); 1282 } else if (cmd == CLSF_DEL) { 1283 a0 = *entry; 1284 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1285 } 1286 1287 return ret; 1288 } 1289 1290 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay, 1291 uint8_t config) 1292 { 1293 uint64_t a0 = overlay; 1294 uint64_t a1 = config; 1295 int wait = 1000; 1296 1297 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); 1298 } 1299 1300 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay, 1301 uint16_t vxlan_udp_port_number) 1302 { 1303 uint64_t a1 = vxlan_udp_port_number; 1304 uint64_t a0 = overlay; 1305 int wait = 1000; 1306 1307 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); 1308 } 1309 1310 int vnic_dev_capable_vxlan(struct vnic_dev *vdev) 1311 { 1312 uint64_t a0 = VIC_FEATURE_VXLAN; 1313 uint64_t a1 = 0; 1314 int wait = 1000; 1315 int ret; 1316 1317 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1318 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ 1319 return ret == 0 && 1320 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == 1321 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); 1322 } 1323 1324 int vnic_dev_capable_geneve(struct vnic_dev *vdev) 1325 { 1326 uint64_t a0 = VIC_FEATURE_GENEVE; 1327 uint64_t a1 = 0; 1328 int wait = 1000; 1329 int ret; 1330 1331 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1332 return ret == 0 && !!(a1 & FEATURE_GENEVE_OPTIONS); 1333 } 1334 1335 uint64_t vnic_dev_capable_cq_entry_size(struct vnic_dev *vdev) 1336 { 1337 uint64_t a0 = CMD_CQ_ENTRY_SIZE_SET; 1338 uint64_t a1 = 0; 1339 int wait = 1000; 1340 int ret; 1341 1342 ret = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 1343 /* All models support 16B CQ entry by default */ 1344 if (!(ret == 0 && a0 == 0)) 1345 a1 = VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE; 1346 return a1; 1347 } 1348 1349 int vnic_dev_set_cq_entry_size(struct vnic_dev *vdev, uint32_t rq_idx, 1350 uint32_t size_flag) 1351 { 1352 uint64_t a0 = rq_idx; 1353 uint64_t a1 = size_flag; 1354 int wait = 1000; 1355 1356 return vnic_dev_cmd(vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); 1357 } 1358