1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <rte_memzone.h> 7 #include <rte_memcpy.h> 8 #include <rte_string_fns.h> 9 #include <rte_ether.h> 10 11 #include "vnic_dev.h" 12 #include "vnic_resource.h" 13 #include "vnic_devcmd.h" 14 #include "vnic_nic.h" 15 #include "vnic_stats.h" 16 #include "vnic_flowman.h" 17 18 19 enum vnic_proxy_type { 20 PROXY_NONE, 21 PROXY_BY_BDF, 22 PROXY_BY_INDEX, 23 }; 24 25 struct vnic_res { 26 void __iomem *vaddr; 27 dma_addr_t bus_addr; 28 unsigned int count; 29 }; 30 31 struct vnic_intr_coal_timer_info { 32 uint32_t mul; 33 uint32_t div; 34 uint32_t max_usec; 35 }; 36 37 struct vnic_dev { 38 void *priv; 39 struct rte_pci_device *pdev; 40 struct vnic_res res[RES_TYPE_MAX]; 41 enum vnic_dev_intr_mode intr_mode; 42 struct vnic_devcmd __iomem *devcmd; 43 struct vnic_devcmd_notify *notify; 44 struct vnic_devcmd_notify notify_copy; 45 dma_addr_t notify_pa; 46 uint32_t notify_sz; 47 dma_addr_t linkstatus_pa; 48 struct vnic_stats *stats; 49 dma_addr_t stats_pa; 50 struct vnic_sriov_stats *sriov_stats; 51 dma_addr_t sriov_stats_pa; 52 struct vnic_devcmd_fw_info *fw_info; 53 dma_addr_t fw_info_pa; 54 struct fm_info *flowman_info; 55 dma_addr_t flowman_info_pa; 56 enum vnic_proxy_type proxy; 57 uint32_t proxy_index; 58 uint64_t args[VNIC_DEVCMD_NARGS]; 59 int in_reset; 60 struct vnic_intr_coal_timer_info intr_coal_timer_info; 61 void *(*alloc_consistent)(void *priv, size_t size, 62 dma_addr_t *dma_handle, uint8_t *name); 63 void (*free_consistent)(void *priv, 64 size_t size, void *vaddr, 65 dma_addr_t dma_handle); 66 /* 67 * Used to serialize devcmd access, currently from PF and its 68 * VF representors. When there are no representors, lock is 69 * not used. 70 */ 71 int locked; 72 void (*lock)(void *priv); 73 void (*unlock)(void *priv); 74 struct vnic_dev *pf_vdev; 75 int vf_id; 76 }; 77 78 #define VNIC_MAX_RES_HDR_SIZE \ 79 (sizeof(struct vnic_resource_header) + \ 80 sizeof(struct vnic_resource) * RES_TYPE_MAX) 81 #define VNIC_RES_STRIDE 128 82 83 void *vnic_dev_priv(struct vnic_dev *vdev) 84 { 85 return vdev->priv; 86 } 87 88 void vnic_register_cbacks(struct vnic_dev *vdev, 89 void *(*alloc_consistent)(void *priv, size_t size, 90 dma_addr_t *dma_handle, uint8_t *name), 91 void (*free_consistent)(void *priv, 92 size_t size, void *vaddr, 93 dma_addr_t dma_handle)) 94 { 95 vdev->alloc_consistent = alloc_consistent; 96 vdev->free_consistent = free_consistent; 97 } 98 99 void vnic_register_lock(struct vnic_dev *vdev, void (*lock)(void *priv), 100 void (*unlock)(void *priv)) 101 { 102 vdev->lock = lock; 103 vdev->unlock = unlock; 104 vdev->locked = 0; 105 } 106 107 static int vnic_dev_discover_res(struct vnic_dev *vdev, 108 struct vnic_dev_bar *bar, unsigned int num_bars) 109 { 110 struct vnic_resource_header __iomem *rh; 111 struct mgmt_barmap_hdr __iomem *mrh; 112 struct vnic_resource __iomem *r; 113 uint8_t type; 114 115 if (num_bars == 0) 116 return -EINVAL; 117 118 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 119 pr_err("vNIC BAR0 res hdr length error\n"); 120 return -EINVAL; 121 } 122 123 rh = bar->vaddr; 124 mrh = bar->vaddr; 125 if (!rh) { 126 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 127 return -EINVAL; 128 } 129 130 /* Check for mgmt vnic in addition to normal vnic */ 131 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 132 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 133 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 134 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 135 pr_err("vNIC BAR0 res magic/version error " \ 136 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 137 VNIC_RES_MAGIC, VNIC_RES_VERSION, 138 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 139 ioread32(&rh->magic), ioread32(&rh->version)); 140 return -EINVAL; 141 } 142 } 143 144 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 145 r = (struct vnic_resource __iomem *)(mrh + 1); 146 else 147 r = (struct vnic_resource __iomem *)(rh + 1); 148 149 150 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 151 uint8_t bar_num = ioread8(&r->bar); 152 uint32_t bar_offset = ioread32(&r->bar_offset); 153 uint32_t count = ioread32(&r->count); 154 uint32_t len; 155 156 r++; 157 158 if (bar_num >= num_bars) 159 continue; 160 161 if (!bar[bar_num].len || !bar[bar_num].vaddr) 162 continue; 163 164 switch (type) { 165 case RES_TYPE_WQ: 166 case RES_TYPE_RQ: 167 case RES_TYPE_CQ: 168 case RES_TYPE_INTR_CTRL: 169 case RES_TYPE_ADMIN_WQ: 170 case RES_TYPE_ADMIN_RQ: 171 case RES_TYPE_ADMIN_CQ: 172 /* each count is stride bytes long */ 173 len = count * VNIC_RES_STRIDE; 174 if (len + bar_offset > bar[bar_num].len) { 175 pr_err("vNIC BAR0 resource %d " \ 176 "out-of-bounds, offset 0x%x + " \ 177 "size 0x%x > bar len 0x%lx\n", 178 type, bar_offset, 179 len, 180 bar[bar_num].len); 181 return -EINVAL; 182 } 183 break; 184 case RES_TYPE_INTR_PBA_LEGACY: 185 case RES_TYPE_DEVCMD: 186 len = count; 187 break; 188 default: 189 continue; 190 } 191 192 vdev->res[type].count = count; 193 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 194 bar_offset; 195 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 196 } 197 198 return 0; 199 } 200 201 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 202 enum vnic_res_type type) 203 { 204 return vdev->res[type].count; 205 } 206 207 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 208 unsigned int index) 209 { 210 if (!vdev->res[type].vaddr) 211 return NULL; 212 213 switch (type) { 214 case RES_TYPE_WQ: 215 case RES_TYPE_RQ: 216 case RES_TYPE_CQ: 217 case RES_TYPE_INTR_CTRL: 218 case RES_TYPE_ADMIN_WQ: 219 case RES_TYPE_ADMIN_RQ: 220 case RES_TYPE_ADMIN_CQ: 221 return (char __iomem *)vdev->res[type].vaddr + 222 index * VNIC_RES_STRIDE; 223 default: 224 return (char __iomem *)vdev->res[type].vaddr; 225 } 226 } 227 228 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 229 unsigned int desc_count, unsigned int desc_size) 230 { 231 /* The base address of the desc rings must be 512 byte aligned. 232 * Descriptor count is aligned to groups of 32 descriptors. A 233 * count of 0 means the maximum 4096 descriptors. Descriptor 234 * size is aligned to 16 bytes. 235 */ 236 237 unsigned int count_align = 32; 238 unsigned int desc_align = 16; 239 240 ring->base_align = 512; 241 242 if (desc_count == 0) 243 desc_count = 4096; 244 245 ring->desc_count = VNIC_ALIGN(desc_count, count_align); 246 247 ring->desc_size = VNIC_ALIGN(desc_size, desc_align); 248 249 ring->size = ring->desc_count * ring->desc_size; 250 ring->size_unaligned = ring->size + ring->base_align; 251 252 return ring->size_unaligned; 253 } 254 255 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 256 { 257 memset(ring->descs, 0, ring->size); 258 } 259 260 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, 261 struct vnic_dev_ring *ring, 262 unsigned int desc_count, unsigned int desc_size, 263 __rte_unused unsigned int socket_id, 264 char *z_name) 265 { 266 void *alloc_addr; 267 dma_addr_t alloc_pa = 0; 268 269 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 270 alloc_addr = vdev->alloc_consistent(vdev->priv, 271 ring->size_unaligned, 272 &alloc_pa, (uint8_t *)z_name); 273 if (!alloc_addr) { 274 pr_err("Failed to allocate ring (size=%d), aborting\n", 275 (int)ring->size); 276 return -ENOMEM; 277 } 278 ring->descs_unaligned = alloc_addr; 279 if (!alloc_pa) { 280 pr_err("Failed to map allocated ring (size=%d), aborting\n", 281 (int)ring->size); 282 vdev->free_consistent(vdev->priv, 283 ring->size_unaligned, 284 alloc_addr, 285 alloc_pa); 286 return -ENOMEM; 287 } 288 ring->base_addr_unaligned = alloc_pa; 289 290 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, 291 ring->base_align); 292 ring->descs = (uint8_t *)ring->descs_unaligned + 293 (ring->base_addr - ring->base_addr_unaligned); 294 295 vnic_dev_clear_desc_ring(ring); 296 297 ring->desc_avail = ring->desc_count - 1; 298 299 return 0; 300 } 301 302 void vnic_dev_free_desc_ring(__rte_unused struct vnic_dev *vdev, 303 struct vnic_dev_ring *ring) 304 { 305 if (ring->descs) { 306 vdev->free_consistent(vdev->priv, 307 ring->size_unaligned, 308 ring->descs_unaligned, 309 ring->base_addr_unaligned); 310 ring->descs = NULL; 311 } 312 } 313 314 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 315 int wait) 316 { 317 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 318 unsigned int i; 319 int delay; 320 uint32_t status; 321 int err; 322 323 status = ioread32(&devcmd->status); 324 if (status == 0xFFFFFFFF) { 325 /* PCI-e target device is gone */ 326 return -ENODEV; 327 } 328 if (status & STAT_BUSY) { 329 330 pr_err("Busy devcmd %d\n", _CMD_N(cmd)); 331 return -EBUSY; 332 } 333 334 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 335 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 336 writeq(vdev->args[i], &devcmd->args[i]); 337 rte_wmb(); /* complete all writes initiated till now */ 338 } 339 340 iowrite32(cmd, &devcmd->cmd); 341 342 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 343 return 0; 344 345 for (delay = 0; delay < wait; delay++) { 346 347 usleep(100); 348 349 status = ioread32(&devcmd->status); 350 if (status == 0xFFFFFFFF) { 351 /* PCI-e target device is gone */ 352 return -ENODEV; 353 } 354 355 if (!(status & STAT_BUSY)) { 356 if (status & STAT_ERROR) { 357 err = -(int)readq(&devcmd->args[0]); 358 if (cmd != CMD_CAPABILITY && 359 cmd != CMD_OVERLAY_OFFLOAD_CTRL && 360 cmd != CMD_GET_SUPP_FEATURE_VER) 361 pr_err("Devcmd %d failed " \ 362 "with error code %d\n", 363 _CMD_N(cmd), err); 364 return err; 365 } 366 367 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 368 rte_rmb();/* finish all reads */ 369 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 370 vdev->args[i] = readq(&devcmd->args[i]); 371 } 372 373 return 0; 374 } 375 } 376 377 pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); 378 return -ETIMEDOUT; 379 } 380 381 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 382 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 383 uint64_t *args, int nargs, int wait) 384 { 385 uint32_t status; 386 int err; 387 388 /* 389 * Proxy command consumes 2 arguments. One for proxy index, 390 * the other is for command to be proxied 391 */ 392 if (nargs > VNIC_DEVCMD_NARGS - 2) { 393 pr_err("number of args %d exceeds the maximum\n", nargs); 394 return -EINVAL; 395 } 396 memset(vdev->args, 0, sizeof(vdev->args)); 397 398 vdev->args[0] = vdev->proxy_index; 399 vdev->args[1] = cmd; 400 memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); 401 402 err = _vnic_dev_cmd(vdev, proxy_cmd, wait); 403 if (err) 404 return err; 405 406 status = (uint32_t)vdev->args[0]; 407 if (status & STAT_ERROR) { 408 err = (int)vdev->args[1]; 409 if (err != ERR_ECMDUNKNOWN || 410 cmd != CMD_CAPABILITY) 411 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); 412 return err; 413 } 414 415 memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); 416 417 return 0; 418 } 419 420 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 421 enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait) 422 { 423 int err; 424 425 if (nargs > VNIC_DEVCMD_NARGS) { 426 pr_err("number of args %d exceeds the maximum\n", nargs); 427 return -EINVAL; 428 } 429 memset(vdev->args, 0, sizeof(vdev->args)); 430 memcpy(vdev->args, args, nargs * sizeof(args[0])); 431 432 err = _vnic_dev_cmd(vdev, cmd, wait); 433 434 memcpy(args, vdev->args, nargs * sizeof(args[0])); 435 436 return err; 437 } 438 439 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, uint16_t index) 440 { 441 vdev->proxy = PROXY_BY_INDEX; 442 vdev->proxy_index = index; 443 } 444 445 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) 446 { 447 vdev->proxy = PROXY_NONE; 448 vdev->proxy_index = 0; 449 } 450 451 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 452 uint64_t *a0, uint64_t *a1, int wait) 453 { 454 uint64_t args[2]; 455 bool vf_rep; 456 int vf_idx; 457 int err; 458 459 vf_rep = false; 460 if (vdev->pf_vdev) { 461 vf_rep = true; 462 vf_idx = vdev->vf_id; 463 /* Everything below assumes PF vdev */ 464 vdev = vdev->pf_vdev; 465 } 466 if (vdev->lock) 467 vdev->lock(vdev->priv); 468 /* For VF representor, proxy devcmd to VF index */ 469 if (vf_rep) 470 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 471 472 args[0] = *a0; 473 args[1] = *a1; 474 memset(vdev->args, 0, sizeof(vdev->args)); 475 476 switch (vdev->proxy) { 477 case PROXY_BY_INDEX: 478 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 479 args, ARRAY_SIZE(args), wait); 480 break; 481 case PROXY_BY_BDF: 482 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 483 args, ARRAY_SIZE(args), wait); 484 break; 485 case PROXY_NONE: 486 default: 487 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); 488 break; 489 } 490 491 if (vf_rep) 492 vnic_dev_cmd_proxy_end(vdev); 493 if (vdev->unlock) 494 vdev->unlock(vdev->priv); 495 if (err == 0) { 496 *a0 = args[0]; 497 *a1 = args[1]; 498 } 499 500 return err; 501 } 502 503 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 504 uint64_t *args, int nargs, int wait) 505 { 506 bool vf_rep; 507 int vf_idx; 508 int err; 509 510 vf_rep = false; 511 if (vdev->pf_vdev) { 512 vf_rep = true; 513 vf_idx = vdev->vf_id; 514 vdev = vdev->pf_vdev; 515 } 516 if (vdev->lock) 517 vdev->lock(vdev->priv); 518 if (vf_rep) 519 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 520 521 switch (vdev->proxy) { 522 case PROXY_BY_INDEX: 523 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 524 args, nargs, wait); 525 break; 526 case PROXY_BY_BDF: 527 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 528 args, nargs, wait); 529 break; 530 case PROXY_NONE: 531 default: 532 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); 533 break; 534 } 535 536 if (vf_rep) 537 vnic_dev_cmd_proxy_end(vdev); 538 if (vdev->unlock) 539 vdev->unlock(vdev->priv); 540 return err; 541 } 542 543 int vnic_dev_fw_info(struct vnic_dev *vdev, 544 struct vnic_devcmd_fw_info **fw_info) 545 { 546 char name[RTE_MEMZONE_NAMESIZE]; 547 uint64_t a0, a1 = 0; 548 int wait = 1000; 549 int err = 0; 550 static uint32_t instance; 551 552 if (!vdev->fw_info) { 553 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u", 554 instance++); 555 vdev->fw_info = vdev->alloc_consistent(vdev->priv, 556 sizeof(struct vnic_devcmd_fw_info), 557 &vdev->fw_info_pa, (uint8_t *)name); 558 if (!vdev->fw_info) 559 return -ENOMEM; 560 a0 = vdev->fw_info_pa; 561 a1 = sizeof(struct vnic_devcmd_fw_info); 562 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, 563 &a0, &a1, wait); 564 } 565 *fw_info = vdev->fw_info; 566 return err; 567 } 568 569 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args, 570 int nargs) 571 { 572 memset(args, 0, nargs * sizeof(*args)); 573 args[0] = CMD_ADD_ADV_FILTER; 574 args[1] = FILTER_CAP_MODE_V1_FLAG; 575 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); 576 } 577 578 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) 579 { 580 uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0; 581 int wait = 1000; 582 int err; 583 584 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 585 if (err) 586 return 0; 587 return (a1 >= (uint32_t)FILTER_DPDK_1); 588 } 589 590 int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs) 591 { 592 int wait = 1000; 593 594 return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait); 595 } 596 597 static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode, 598 uint8_t *filter_actions) 599 { 600 char name[RTE_MEMZONE_NAMESIZE]; 601 uint64_t args[3]; 602 uint64_t ops; 603 static uint32_t instance; 604 605 /* Advanced filtering is a prerequisite */ 606 if (!vnic_dev_capable_adv_filters(vdev)) 607 return 0; 608 /* flowman devcmd available? */ 609 if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP)) 610 return 0; 611 /* Have the version we are using? */ 612 args[0] = FM_API_VERSION_QUERY; 613 if (vnic_dev_flowman_cmd(vdev, args, 1)) 614 return 0; 615 if ((args[0] & (1ULL << FM_VERSION)) == 0) 616 return 0; 617 /* Select the version */ 618 args[0] = FM_API_VERSION_SELECT; 619 args[1] = FM_VERSION; 620 if (vnic_dev_flowman_cmd(vdev, args, 2)) 621 return 0; 622 /* Can we get fm_info? */ 623 if (!vdev->flowman_info) { 624 snprintf((char *)name, sizeof(name), "vnic_fm_info-%u", 625 instance++); 626 vdev->flowman_info = vdev->alloc_consistent(vdev->priv, 627 sizeof(struct fm_info), 628 &vdev->flowman_info_pa, (uint8_t *)name); 629 if (!vdev->flowman_info) 630 return 0; 631 } 632 args[0] = FM_INFO_QUERY; 633 args[1] = vdev->flowman_info_pa; 634 args[2] = sizeof(struct fm_info); 635 if (vnic_dev_flowman_cmd(vdev, args, 3)) 636 return 0; 637 /* Have required operations? */ 638 ops = (1ULL << FMOP_END) | 639 (1ULL << FMOP_DROP) | 640 (1ULL << FMOP_RQ_STEER) | 641 (1ULL << FMOP_EXACT_MATCH) | 642 (1ULL << FMOP_MARK) | 643 (1ULL << FMOP_TAG) | 644 (1ULL << FMOP_EG_HAIRPIN) | 645 (1ULL << FMOP_ENCAP) | 646 (1ULL << FMOP_DECAP_NOSTRIP); 647 if ((vdev->flowman_info->fm_op_mask & ops) != ops) 648 return 0; 649 /* Good to use flowman now */ 650 *mode = FILTER_FLOWMAN; 651 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG | 652 FILTER_ACTION_FILTER_ID_FLAG | 653 FILTER_ACTION_COUNTER_FLAG | 654 FILTER_ACTION_DROP_FLAG; 655 return 1; 656 } 657 658 /* Determine the "best" filtering mode VIC is capable of. Returns one of 4 659 * value or 0 if filtering is unavailble: 660 * FILTER_FLOWMAN- flowman api capable 661 * FILTER_DPDK_1- advanced filters availabile 662 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that 663 * the IP layer must explicitly specified. I.e. cannot have a UDP 664 * filter that matches both IPv4 and IPv6. 665 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. 666 * all other filter types are not available. 667 * Retrun true in filter_tags if supported 668 */ 669 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode, 670 uint8_t *filter_actions) 671 { 672 uint64_t args[4]; 673 int err; 674 uint32_t max_level = 0; 675 676 /* If flowman is available, use it as it is the most capable API */ 677 if (vnic_dev_flowman_enable(vdev, mode, filter_actions)) 678 return 0; 679 680 err = vnic_dev_advanced_filters_cap(vdev, args, 4); 681 682 /* determine supported filter actions */ 683 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ 684 if (args[2] == FILTER_CAP_MODE_V1) 685 *filter_actions = args[3]; 686 687 if (err || ((args[0] == 1) && (args[1] == 0))) { 688 /* Adv filter Command not supported or adv filters available but 689 * not enabled. Try the normal filter capability command. 690 */ 691 args[0] = CMD_ADD_FILTER; 692 args[1] = 0; 693 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); 694 /* 695 * ERR_EPERM may be returned if, for example, vNIC is 696 * on a VF. It simply means no filtering is available 697 */ 698 if (err == -ERR_EPERM) { 699 *mode = 0; 700 return 0; 701 } 702 if (err) 703 return err; 704 max_level = args[1]; 705 goto parse_max_level; 706 } else if (args[2] == FILTER_CAP_MODE_V1) { 707 /* parse filter capability mask in args[1] */ 708 if (args[1] & FILTER_DPDK_1_FLAG) 709 *mode = FILTER_DPDK_1; 710 else if (args[1] & FILTER_USNIC_IP_FLAG) 711 *mode = FILTER_USNIC_IP; 712 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) 713 *mode = FILTER_IPV4_5TUPLE; 714 return 0; 715 } 716 max_level = args[1]; 717 parse_max_level: 718 if (max_level >= (uint32_t)FILTER_USNIC_IP) 719 *mode = FILTER_USNIC_IP; 720 else 721 *mode = FILTER_IPV4_5TUPLE; 722 return 0; 723 } 724 725 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, 726 bool *weak) 727 { 728 uint64_t a0 = CMD_NIC_CFG, a1 = 0; 729 int wait = 1000; 730 int err; 731 732 *cfg_chk = false; 733 *weak = false; 734 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 735 if (err == 0 && a0 != 0 && a1 != 0) { 736 *cfg_chk = true; 737 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); 738 } 739 } 740 741 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 742 { 743 uint64_t a0 = (uint32_t)cmd, a1 = 0; 744 int wait = 1000; 745 int err; 746 747 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 748 749 return !(err || a0); 750 } 751 752 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, 753 void *value) 754 { 755 uint64_t a0, a1; 756 int wait = 1000; 757 int err; 758 759 a0 = offset; 760 a1 = size; 761 762 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 763 764 switch (size) { 765 case 1: 766 *(uint8_t *)value = (uint8_t)a0; 767 break; 768 case 2: 769 *(uint16_t *)value = (uint16_t)a0; 770 break; 771 case 4: 772 *(uint32_t *)value = (uint32_t)a0; 773 break; 774 case 8: 775 *(uint64_t *)value = a0; 776 break; 777 default: 778 BUG(); 779 break; 780 } 781 782 return err; 783 } 784 785 int vnic_dev_stats_clear(struct vnic_dev *vdev) 786 { 787 uint64_t a0 = 0, a1 = 0; 788 int wait = 1000; 789 790 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 791 } 792 793 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 794 { 795 uint64_t a0, a1; 796 int wait = 1000; 797 798 if (!vdev->stats) 799 return -ENOMEM; 800 801 *stats = vdev->stats; 802 a0 = vdev->stats_pa; 803 a1 = sizeof(struct vnic_stats); 804 805 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 806 } 807 808 int vnic_dev_close(struct vnic_dev *vdev) 809 { 810 uint64_t a0 = 0, a1 = 0; 811 int wait = 1000; 812 813 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 814 } 815 816 int vnic_dev_enable_wait(struct vnic_dev *vdev) 817 { 818 uint64_t a0 = 0, a1 = 0; 819 int wait = 1000; 820 821 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 822 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 823 else 824 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 825 } 826 827 int vnic_dev_disable(struct vnic_dev *vdev) 828 { 829 uint64_t a0 = 0, a1 = 0; 830 int wait = 1000; 831 832 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 833 } 834 835 int vnic_dev_open(struct vnic_dev *vdev, int arg) 836 { 837 uint64_t a0 = (uint32_t)arg, a1 = 0; 838 int wait = 1000; 839 840 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 841 } 842 843 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 844 { 845 uint64_t a0 = 0, a1 = 0; 846 int wait = 1000; 847 int err; 848 849 *done = 0; 850 851 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 852 if (err) 853 return err; 854 855 *done = (a0 == 0); 856 857 return 0; 858 } 859 860 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr) 861 { 862 uint64_t a0 = 0, a1 = 0; 863 int wait = 1000; 864 int err, i; 865 866 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 867 mac_addr[i] = 0; 868 869 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 870 if (err) 871 return err; 872 873 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 874 mac_addr[i] = ((uint8_t *)&a0)[i]; 875 876 return 0; 877 } 878 879 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 880 int broadcast, int promisc, int allmulti) 881 { 882 uint64_t a0, a1 = 0; 883 int wait = 1000; 884 int err; 885 886 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 887 (multicast ? CMD_PFILTER_MULTICAST : 0) | 888 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 889 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 890 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 891 892 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 893 if (err) 894 pr_err("Can't set packet filter\n"); 895 896 return err; 897 } 898 899 int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr) 900 { 901 uint64_t a0 = 0, a1 = 0; 902 int wait = 1000; 903 int err; 904 int i; 905 906 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 907 ((uint8_t *)&a0)[i] = addr[i]; 908 909 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 910 if (err) 911 pr_err("Can't add addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 912 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 913 err); 914 915 return err; 916 } 917 918 int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr) 919 { 920 uint64_t a0 = 0, a1 = 0; 921 int wait = 1000; 922 int err; 923 int i; 924 925 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 926 ((uint8_t *)&a0)[i] = addr[i]; 927 928 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 929 if (err) 930 pr_err("Can't del addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 931 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 932 err); 933 934 return err; 935 } 936 937 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 938 uint8_t ig_vlan_rewrite_mode) 939 { 940 uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0; 941 int wait = 1000; 942 943 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 944 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 945 &a0, &a1, wait); 946 else 947 return 0; 948 } 949 950 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) 951 { 952 vdev->in_reset = state; 953 } 954 955 static inline int vnic_dev_in_reset(struct vnic_dev *vdev) 956 { 957 return vdev->in_reset; 958 } 959 960 int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 961 void *notify_addr, dma_addr_t notify_pa, uint16_t intr) 962 { 963 uint64_t a0, a1; 964 int wait = 1000; 965 int r; 966 967 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 968 if (!vnic_dev_in_reset(vdev)) { 969 vdev->notify = notify_addr; 970 vdev->notify_pa = notify_pa; 971 } 972 973 a0 = (uint64_t)notify_pa; 974 a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL; 975 a1 += sizeof(struct vnic_devcmd_notify); 976 977 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 978 if (!vnic_dev_in_reset(vdev)) 979 vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0; 980 981 return r; 982 } 983 984 int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr) 985 { 986 void *notify_addr = NULL; 987 dma_addr_t notify_pa = 0; 988 char name[RTE_MEMZONE_NAMESIZE]; 989 static uint32_t instance; 990 991 if (vdev->notify || vdev->notify_pa) { 992 return vnic_dev_notify_setcmd(vdev, vdev->notify, 993 vdev->notify_pa, intr); 994 } 995 if (!vnic_dev_in_reset(vdev)) { 996 snprintf((char *)name, sizeof(name), 997 "vnic_notify-%u", instance++); 998 notify_addr = vdev->alloc_consistent(vdev->priv, 999 sizeof(struct vnic_devcmd_notify), 1000 ¬ify_pa, (uint8_t *)name); 1001 if (!notify_addr) 1002 return -ENOMEM; 1003 } 1004 1005 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 1006 } 1007 1008 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 1009 { 1010 uint64_t a0, a1; 1011 int wait = 1000; 1012 int err; 1013 1014 a0 = 0; /* paddr = 0 to unset notify buffer */ 1015 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 1016 a1 += sizeof(struct vnic_devcmd_notify); 1017 1018 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 1019 if (!vnic_dev_in_reset(vdev)) { 1020 vdev->notify = NULL; 1021 vdev->notify_pa = 0; 1022 vdev->notify_sz = 0; 1023 } 1024 1025 return err; 1026 } 1027 1028 int vnic_dev_notify_unset(struct vnic_dev *vdev) 1029 { 1030 if (vdev->notify && !vnic_dev_in_reset(vdev)) { 1031 vdev->free_consistent(vdev->priv, 1032 sizeof(struct vnic_devcmd_notify), 1033 vdev->notify, 1034 vdev->notify_pa); 1035 } 1036 1037 return vnic_dev_notify_unsetcmd(vdev); 1038 } 1039 1040 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 1041 { 1042 uint32_t *words; 1043 unsigned int nwords = vdev->notify_sz / 4; 1044 unsigned int i; 1045 uint32_t csum; 1046 1047 if (!vdev->notify || !vdev->notify_sz) 1048 return 0; 1049 1050 do { 1051 csum = 0; 1052 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 1053 words = (uint32_t *)&vdev->notify_copy; 1054 for (i = 1; i < nwords; i++) 1055 csum += words[i]; 1056 } while (csum != words[0]); 1057 1058 return 1; 1059 } 1060 1061 int vnic_dev_init(struct vnic_dev *vdev, int arg) 1062 { 1063 uint64_t a0 = (uint32_t)arg, a1 = 0; 1064 int wait = 1000; 1065 int r = 0; 1066 1067 if (vnic_dev_capable(vdev, CMD_INIT)) 1068 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 1069 else { 1070 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 1071 if (a0 & CMD_INITF_DEFAULT_MAC) { 1072 /* Emulate these for old CMD_INIT_v1 which 1073 * didn't pass a0 so no CMD_INITF_*. 1074 */ 1075 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 1076 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 1077 } 1078 } 1079 return r; 1080 } 1081 1082 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 1083 { 1084 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 1085 vdev->intr_coal_timer_info.mul = 2; 1086 vdev->intr_coal_timer_info.div = 3; 1087 vdev->intr_coal_timer_info.max_usec = 1088 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 1089 } 1090 1091 int vnic_dev_link_status(struct vnic_dev *vdev) 1092 { 1093 if (!vnic_dev_notify_ready(vdev)) 1094 return 0; 1095 1096 return vdev->notify_copy.link_state; 1097 } 1098 1099 uint32_t vnic_dev_port_speed(struct vnic_dev *vdev) 1100 { 1101 if (!vnic_dev_notify_ready(vdev)) 1102 return 0; 1103 1104 return vdev->notify_copy.port_speed; 1105 } 1106 1107 uint32_t vnic_dev_mtu(struct vnic_dev *vdev) 1108 { 1109 if (!vnic_dev_notify_ready(vdev)) 1110 return 0; 1111 1112 return vdev->notify_copy.mtu; 1113 } 1114 1115 uint32_t vnic_dev_uif(struct vnic_dev *vdev) 1116 { 1117 if (!vnic_dev_notify_ready(vdev)) 1118 return 0; 1119 1120 return vdev->notify_copy.uif; 1121 } 1122 1123 uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, 1124 uint32_t usec) 1125 { 1126 return (usec * vdev->intr_coal_timer_info.mul) / 1127 vdev->intr_coal_timer_info.div; 1128 } 1129 1130 uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, 1131 uint32_t hw_cycles) 1132 { 1133 return (hw_cycles * vdev->intr_coal_timer_info.div) / 1134 vdev->intr_coal_timer_info.mul; 1135 } 1136 1137 uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 1138 { 1139 return vdev->intr_coal_timer_info.max_usec; 1140 } 1141 1142 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) 1143 { 1144 char name[RTE_MEMZONE_NAMESIZE]; 1145 static uint32_t instance; 1146 1147 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); 1148 vdev->stats = vdev->alloc_consistent(vdev->priv, 1149 sizeof(struct vnic_stats), 1150 &vdev->stats_pa, (uint8_t *)name); 1151 return vdev->stats == NULL ? -ENOMEM : 0; 1152 } 1153 1154 int vnic_dev_alloc_sriov_stats_mem(struct vnic_dev *vdev) 1155 { 1156 char name[RTE_MEMZONE_NAMESIZE]; 1157 static uint32_t instance; 1158 1159 snprintf((char *)name, sizeof(name), "vnic_sriov_stats-%u", instance++); 1160 vdev->sriov_stats = vdev->alloc_consistent(vdev->priv, 1161 sizeof(struct vnic_sriov_stats), 1162 &vdev->sriov_stats_pa, (uint8_t *)name); 1163 return vdev->sriov_stats == NULL ? -ENOMEM : 0; 1164 } 1165 1166 void vnic_dev_unregister(struct vnic_dev *vdev) 1167 { 1168 if (vdev) { 1169 if (vdev->notify) 1170 vdev->free_consistent(vdev->priv, 1171 sizeof(struct vnic_devcmd_notify), 1172 vdev->notify, 1173 vdev->notify_pa); 1174 if (vdev->stats) 1175 vdev->free_consistent(vdev->priv, 1176 sizeof(struct vnic_stats), 1177 vdev->stats, vdev->stats_pa); 1178 if (vdev->sriov_stats) 1179 vdev->free_consistent(vdev->priv, 1180 sizeof(struct vnic_sriov_stats), 1181 vdev->sriov_stats, vdev->sriov_stats_pa); 1182 if (vdev->flowman_info) 1183 vdev->free_consistent(vdev->priv, 1184 sizeof(struct fm_info), 1185 vdev->flowman_info, vdev->flowman_info_pa); 1186 if (vdev->fw_info) 1187 vdev->free_consistent(vdev->priv, 1188 sizeof(struct vnic_devcmd_fw_info), 1189 vdev->fw_info, vdev->fw_info_pa); 1190 rte_free(vdev); 1191 } 1192 } 1193 1194 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 1195 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, 1196 unsigned int num_bars) 1197 { 1198 if (!vdev) { 1199 char name[RTE_MEMZONE_NAMESIZE]; 1200 snprintf((char *)name, sizeof(name), "%s-vnic", 1201 pdev->device.name); 1202 vdev = (struct vnic_dev *)rte_zmalloc_socket(name, 1203 sizeof(struct vnic_dev), 1204 RTE_CACHE_LINE_SIZE, 1205 pdev->device.numa_node); 1206 if (!vdev) 1207 return NULL; 1208 } 1209 1210 vdev->priv = priv; 1211 vdev->pdev = pdev; 1212 1213 if (vnic_dev_discover_res(vdev, bar, num_bars)) 1214 goto err_out; 1215 1216 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 1217 if (!vdev->devcmd) 1218 goto err_out; 1219 1220 return vdev; 1221 1222 err_out: 1223 vnic_dev_unregister(vdev); 1224 return NULL; 1225 } 1226 1227 struct vnic_dev *vnic_vf_rep_register(void *priv, struct vnic_dev *pf_vdev, 1228 int vf_id) 1229 { 1230 struct vnic_dev *vdev; 1231 1232 vdev = (struct vnic_dev *)rte_zmalloc("enic-vf-rep-vdev", 1233 sizeof(struct vnic_dev), RTE_CACHE_LINE_SIZE); 1234 if (!vdev) 1235 return NULL; 1236 vdev->priv = priv; 1237 vdev->pf_vdev = pf_vdev; 1238 vdev->vf_id = vf_id; 1239 vdev->alloc_consistent = pf_vdev->alloc_consistent; 1240 vdev->free_consistent = pf_vdev->free_consistent; 1241 return vdev; 1242 } 1243 1244 /* 1245 * vnic_dev_classifier: Add/Delete classifier entries 1246 * @vdev: vdev of the device 1247 * @cmd: CLSF_ADD for Add filter 1248 * CLSF_DEL for Delete filter 1249 * @entry: In case of ADD filter, the caller passes the RQ number in this 1250 * variable. 1251 * This function stores the filter_id returned by the 1252 * firmware in the same variable before return; 1253 * 1254 * In case of DEL filter, the caller passes the RQ number. Return 1255 * value is irrelevant. 1256 * @data: filter data 1257 * @action: action data 1258 */ 1259 int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry, 1260 struct filter_v2 *data, struct filter_action_v2 *action_v2) 1261 { 1262 uint64_t a0 = 0, a1 = 0; 1263 int wait = 1000; 1264 dma_addr_t tlv_pa; 1265 int ret = -EINVAL; 1266 struct filter_tlv *tlv, *tlv_va; 1267 uint64_t tlv_size; 1268 uint32_t filter_size, action_size; 1269 static unsigned int unique_id; 1270 char z_name[RTE_MEMZONE_NAMESIZE]; 1271 enum vnic_devcmd_cmd dev_cmd; 1272 1273 if (cmd == CLSF_ADD) { 1274 dev_cmd = (data->type >= FILTER_DPDK_1) ? 1275 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; 1276 1277 filter_size = vnic_filter_size(data); 1278 action_size = vnic_action_size(action_v2); 1279 1280 tlv_size = filter_size + action_size + 1281 2*sizeof(struct filter_tlv); 1282 snprintf((char *)z_name, sizeof(z_name), 1283 "vnic_clsf_%u", unique_id++); 1284 tlv_va = vdev->alloc_consistent(vdev->priv, 1285 tlv_size, &tlv_pa, (uint8_t *)z_name); 1286 if (!tlv_va) 1287 return -ENOMEM; 1288 tlv = tlv_va; 1289 a0 = tlv_pa; 1290 a1 = tlv_size; 1291 memset(tlv, 0, tlv_size); 1292 tlv->type = CLSF_TLV_FILTER; 1293 tlv->length = filter_size; 1294 memcpy(&tlv->val, (void *)data, filter_size); 1295 1296 tlv = (struct filter_tlv *)((char *)tlv + 1297 sizeof(struct filter_tlv) + 1298 filter_size); 1299 1300 tlv->type = CLSF_TLV_ACTION; 1301 tlv->length = action_size; 1302 memcpy(&tlv->val, (void *)action_v2, action_size); 1303 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); 1304 *entry = (uint16_t)a0; 1305 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); 1306 } else if (cmd == CLSF_DEL) { 1307 a0 = *entry; 1308 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1309 } 1310 1311 return ret; 1312 } 1313 1314 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay, 1315 uint8_t config) 1316 { 1317 uint64_t a0 = overlay; 1318 uint64_t a1 = config; 1319 int wait = 1000; 1320 1321 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); 1322 } 1323 1324 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay, 1325 uint16_t vxlan_udp_port_number) 1326 { 1327 uint64_t a1 = vxlan_udp_port_number; 1328 uint64_t a0 = overlay; 1329 int wait = 1000; 1330 1331 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); 1332 } 1333 1334 int vnic_dev_capable_vxlan(struct vnic_dev *vdev) 1335 { 1336 uint64_t a0 = VIC_FEATURE_VXLAN; 1337 uint64_t a1 = 0; 1338 int wait = 1000; 1339 int ret; 1340 1341 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1342 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ 1343 return ret == 0 && 1344 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == 1345 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); 1346 } 1347 1348 int vnic_dev_capable_geneve(struct vnic_dev *vdev) 1349 { 1350 uint64_t a0 = VIC_FEATURE_GENEVE; 1351 uint64_t a1 = 0; 1352 int wait = 1000; 1353 int ret; 1354 1355 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1356 return ret == 0 && !!(a1 & FEATURE_GENEVE_OPTIONS); 1357 } 1358 1359 uint64_t vnic_dev_capable_cq_entry_size(struct vnic_dev *vdev) 1360 { 1361 uint64_t a0 = CMD_CQ_ENTRY_SIZE_SET; 1362 uint64_t a1 = 0; 1363 int wait = 1000; 1364 int ret; 1365 1366 ret = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 1367 /* All models support 16B CQ entry by default */ 1368 if (!(ret == 0 && a0 == 0)) 1369 a1 = VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE; 1370 return a1; 1371 } 1372 1373 int vnic_dev_set_cq_entry_size(struct vnic_dev *vdev, uint32_t rq_idx, 1374 uint32_t size_flag) 1375 { 1376 uint64_t a0 = rq_idx; 1377 uint64_t a1 = size_flag; 1378 int wait = 1000; 1379 1380 return vnic_dev_cmd(vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); 1381 } 1382 1383 int vnic_dev_enable_admin_qp(struct vnic_dev *vdev, uint32_t enable) 1384 { 1385 uint64_t a0, a1; 1386 int wait = 1000; 1387 1388 a0 = QP_TYPE_ADMIN; 1389 a1 = enable; 1390 return vnic_dev_cmd(vdev, CMD_QP_TYPE_SET, &a0, &a1, wait); 1391 } 1392 1393 int vnic_dev_sriov_stats(struct vnic_dev *vdev, struct vnic_sriov_stats **stats) 1394 { 1395 uint64_t a0, a1; 1396 int wait = 1000; 1397 int err; 1398 1399 a0 = vdev->sriov_stats_pa; 1400 a1 = sizeof(struct vnic_sriov_stats); 1401 err = vnic_dev_cmd(vdev, CMD_SRIOV_STATS_GET, &a0, &a1, wait); 1402 if (!err) 1403 *stats = vdev->sriov_stats; 1404 return err; 1405 } 1406