1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <rte_memzone.h> 7 #include <rte_memcpy.h> 8 #include <rte_string_fns.h> 9 #include <rte_ether.h> 10 11 #include "vnic_dev.h" 12 #include "vnic_resource.h" 13 #include "vnic_devcmd.h" 14 #include "vnic_nic.h" 15 #include "vnic_stats.h" 16 #include "vnic_flowman.h" 17 18 19 enum vnic_proxy_type { 20 PROXY_NONE, 21 PROXY_BY_BDF, 22 PROXY_BY_INDEX, 23 }; 24 25 struct vnic_res { 26 void __iomem *vaddr; 27 dma_addr_t bus_addr; 28 unsigned int count; 29 }; 30 31 struct vnic_intr_coal_timer_info { 32 uint32_t mul; 33 uint32_t div; 34 uint32_t max_usec; 35 }; 36 37 struct vnic_dev { 38 void *priv; 39 struct rte_pci_device *pdev; 40 struct vnic_res res[RES_TYPE_MAX]; 41 enum vnic_dev_intr_mode intr_mode; 42 struct vnic_devcmd __iomem *devcmd; 43 struct vnic_devcmd_notify *notify; 44 struct vnic_devcmd_notify notify_copy; 45 dma_addr_t notify_pa; 46 uint32_t notify_sz; 47 dma_addr_t linkstatus_pa; 48 struct vnic_stats *stats; 49 dma_addr_t stats_pa; 50 struct vnic_devcmd_fw_info *fw_info; 51 dma_addr_t fw_info_pa; 52 struct fm_info *flowman_info; 53 dma_addr_t flowman_info_pa; 54 enum vnic_proxy_type proxy; 55 uint32_t proxy_index; 56 uint64_t args[VNIC_DEVCMD_NARGS]; 57 int in_reset; 58 struct vnic_intr_coal_timer_info intr_coal_timer_info; 59 void *(*alloc_consistent)(void *priv, size_t size, 60 dma_addr_t *dma_handle, uint8_t *name); 61 void (*free_consistent)(void *priv, 62 size_t size, void *vaddr, 63 dma_addr_t dma_handle); 64 /* 65 * Used to serialize devcmd access, currently from PF and its 66 * VF representors. When there are no representors, lock is 67 * not used. 68 */ 69 int locked; 70 void (*lock)(void *priv); 71 void (*unlock)(void *priv); 72 struct vnic_dev *pf_vdev; 73 int vf_id; 74 }; 75 76 #define VNIC_MAX_RES_HDR_SIZE \ 77 (sizeof(struct vnic_resource_header) + \ 78 sizeof(struct vnic_resource) * RES_TYPE_MAX) 79 #define VNIC_RES_STRIDE 128 80 81 void *vnic_dev_priv(struct vnic_dev *vdev) 82 { 83 return vdev->priv; 84 } 85 86 void vnic_register_cbacks(struct vnic_dev *vdev, 87 void *(*alloc_consistent)(void *priv, size_t size, 88 dma_addr_t *dma_handle, uint8_t *name), 89 void (*free_consistent)(void *priv, 90 size_t size, void *vaddr, 91 dma_addr_t dma_handle)) 92 { 93 vdev->alloc_consistent = alloc_consistent; 94 vdev->free_consistent = free_consistent; 95 } 96 97 void vnic_register_lock(struct vnic_dev *vdev, void (*lock)(void *priv), 98 void (*unlock)(void *priv)) 99 { 100 vdev->lock = lock; 101 vdev->unlock = unlock; 102 vdev->locked = 0; 103 } 104 105 static int vnic_dev_discover_res(struct vnic_dev *vdev, 106 struct vnic_dev_bar *bar, unsigned int num_bars) 107 { 108 struct vnic_resource_header __iomem *rh; 109 struct mgmt_barmap_hdr __iomem *mrh; 110 struct vnic_resource __iomem *r; 111 uint8_t type; 112 113 if (num_bars == 0) 114 return -EINVAL; 115 116 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 117 pr_err("vNIC BAR0 res hdr length error\n"); 118 return -EINVAL; 119 } 120 121 rh = bar->vaddr; 122 mrh = bar->vaddr; 123 if (!rh) { 124 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 125 return -EINVAL; 126 } 127 128 /* Check for mgmt vnic in addition to normal vnic */ 129 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 130 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 131 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 132 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 133 pr_err("vNIC BAR0 res magic/version error " \ 134 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 135 VNIC_RES_MAGIC, VNIC_RES_VERSION, 136 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 137 ioread32(&rh->magic), ioread32(&rh->version)); 138 return -EINVAL; 139 } 140 } 141 142 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 143 r = (struct vnic_resource __iomem *)(mrh + 1); 144 else 145 r = (struct vnic_resource __iomem *)(rh + 1); 146 147 148 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 149 uint8_t bar_num = ioread8(&r->bar); 150 uint32_t bar_offset = ioread32(&r->bar_offset); 151 uint32_t count = ioread32(&r->count); 152 uint32_t len; 153 154 r++; 155 156 if (bar_num >= num_bars) 157 continue; 158 159 if (!bar[bar_num].len || !bar[bar_num].vaddr) 160 continue; 161 162 switch (type) { 163 case RES_TYPE_WQ: 164 case RES_TYPE_RQ: 165 case RES_TYPE_CQ: 166 case RES_TYPE_INTR_CTRL: 167 /* each count is stride bytes long */ 168 len = count * VNIC_RES_STRIDE; 169 if (len + bar_offset > bar[bar_num].len) { 170 pr_err("vNIC BAR0 resource %d " \ 171 "out-of-bounds, offset 0x%x + " \ 172 "size 0x%x > bar len 0x%lx\n", 173 type, bar_offset, 174 len, 175 bar[bar_num].len); 176 return -EINVAL; 177 } 178 break; 179 case RES_TYPE_INTR_PBA_LEGACY: 180 case RES_TYPE_DEVCMD: 181 len = count; 182 break; 183 default: 184 continue; 185 } 186 187 vdev->res[type].count = count; 188 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 189 bar_offset; 190 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 191 } 192 193 return 0; 194 } 195 196 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 197 enum vnic_res_type type) 198 { 199 return vdev->res[type].count; 200 } 201 202 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 203 unsigned int index) 204 { 205 if (!vdev->res[type].vaddr) 206 return NULL; 207 208 switch (type) { 209 case RES_TYPE_WQ: 210 case RES_TYPE_RQ: 211 case RES_TYPE_CQ: 212 case RES_TYPE_INTR_CTRL: 213 return (char __iomem *)vdev->res[type].vaddr + 214 index * VNIC_RES_STRIDE; 215 default: 216 return (char __iomem *)vdev->res[type].vaddr; 217 } 218 } 219 220 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 221 unsigned int desc_count, unsigned int desc_size) 222 { 223 /* The base address of the desc rings must be 512 byte aligned. 224 * Descriptor count is aligned to groups of 32 descriptors. A 225 * count of 0 means the maximum 4096 descriptors. Descriptor 226 * size is aligned to 16 bytes. 227 */ 228 229 unsigned int count_align = 32; 230 unsigned int desc_align = 16; 231 232 ring->base_align = 512; 233 234 if (desc_count == 0) 235 desc_count = 4096; 236 237 ring->desc_count = VNIC_ALIGN(desc_count, count_align); 238 239 ring->desc_size = VNIC_ALIGN(desc_size, desc_align); 240 241 ring->size = ring->desc_count * ring->desc_size; 242 ring->size_unaligned = ring->size + ring->base_align; 243 244 return ring->size_unaligned; 245 } 246 247 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 248 { 249 memset(ring->descs, 0, ring->size); 250 } 251 252 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, 253 struct vnic_dev_ring *ring, 254 unsigned int desc_count, unsigned int desc_size, 255 __rte_unused unsigned int socket_id, 256 char *z_name) 257 { 258 void *alloc_addr; 259 dma_addr_t alloc_pa = 0; 260 261 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 262 alloc_addr = vdev->alloc_consistent(vdev->priv, 263 ring->size_unaligned, 264 &alloc_pa, (uint8_t *)z_name); 265 if (!alloc_addr) { 266 pr_err("Failed to allocate ring (size=%d), aborting\n", 267 (int)ring->size); 268 return -ENOMEM; 269 } 270 ring->descs_unaligned = alloc_addr; 271 if (!alloc_pa) { 272 pr_err("Failed to map allocated ring (size=%d), aborting\n", 273 (int)ring->size); 274 vdev->free_consistent(vdev->priv, 275 ring->size_unaligned, 276 alloc_addr, 277 alloc_pa); 278 return -ENOMEM; 279 } 280 ring->base_addr_unaligned = alloc_pa; 281 282 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, 283 ring->base_align); 284 ring->descs = (uint8_t *)ring->descs_unaligned + 285 (ring->base_addr - ring->base_addr_unaligned); 286 287 vnic_dev_clear_desc_ring(ring); 288 289 ring->desc_avail = ring->desc_count - 1; 290 291 return 0; 292 } 293 294 void vnic_dev_free_desc_ring(__rte_unused struct vnic_dev *vdev, 295 struct vnic_dev_ring *ring) 296 { 297 if (ring->descs) { 298 vdev->free_consistent(vdev->priv, 299 ring->size_unaligned, 300 ring->descs_unaligned, 301 ring->base_addr_unaligned); 302 ring->descs = NULL; 303 } 304 } 305 306 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 307 int wait) 308 { 309 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 310 unsigned int i; 311 int delay; 312 uint32_t status; 313 int err; 314 315 status = ioread32(&devcmd->status); 316 if (status == 0xFFFFFFFF) { 317 /* PCI-e target device is gone */ 318 return -ENODEV; 319 } 320 if (status & STAT_BUSY) { 321 322 pr_err("Busy devcmd %d\n", _CMD_N(cmd)); 323 return -EBUSY; 324 } 325 326 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 327 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 328 writeq(vdev->args[i], &devcmd->args[i]); 329 rte_wmb(); /* complete all writes initiated till now */ 330 } 331 332 iowrite32(cmd, &devcmd->cmd); 333 334 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 335 return 0; 336 337 for (delay = 0; delay < wait; delay++) { 338 339 usleep(100); 340 341 status = ioread32(&devcmd->status); 342 if (status == 0xFFFFFFFF) { 343 /* PCI-e target device is gone */ 344 return -ENODEV; 345 } 346 347 if (!(status & STAT_BUSY)) { 348 if (status & STAT_ERROR) { 349 err = -(int)readq(&devcmd->args[0]); 350 if (cmd != CMD_CAPABILITY && 351 cmd != CMD_OVERLAY_OFFLOAD_CTRL && 352 cmd != CMD_GET_SUPP_FEATURE_VER) 353 pr_err("Devcmd %d failed " \ 354 "with error code %d\n", 355 _CMD_N(cmd), err); 356 return err; 357 } 358 359 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 360 rte_rmb();/* finish all reads */ 361 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 362 vdev->args[i] = readq(&devcmd->args[i]); 363 } 364 365 return 0; 366 } 367 } 368 369 pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); 370 return -ETIMEDOUT; 371 } 372 373 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 374 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 375 uint64_t *args, int nargs, int wait) 376 { 377 uint32_t status; 378 int err; 379 380 /* 381 * Proxy command consumes 2 arguments. One for proxy index, 382 * the other is for command to be proxied 383 */ 384 if (nargs > VNIC_DEVCMD_NARGS - 2) { 385 pr_err("number of args %d exceeds the maximum\n", nargs); 386 return -EINVAL; 387 } 388 memset(vdev->args, 0, sizeof(vdev->args)); 389 390 vdev->args[0] = vdev->proxy_index; 391 vdev->args[1] = cmd; 392 memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); 393 394 err = _vnic_dev_cmd(vdev, proxy_cmd, wait); 395 if (err) 396 return err; 397 398 status = (uint32_t)vdev->args[0]; 399 if (status & STAT_ERROR) { 400 err = (int)vdev->args[1]; 401 if (err != ERR_ECMDUNKNOWN || 402 cmd != CMD_CAPABILITY) 403 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); 404 return err; 405 } 406 407 memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); 408 409 return 0; 410 } 411 412 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 413 enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait) 414 { 415 int err; 416 417 if (nargs > VNIC_DEVCMD_NARGS) { 418 pr_err("number of args %d exceeds the maximum\n", nargs); 419 return -EINVAL; 420 } 421 memset(vdev->args, 0, sizeof(vdev->args)); 422 memcpy(vdev->args, args, nargs * sizeof(args[0])); 423 424 err = _vnic_dev_cmd(vdev, cmd, wait); 425 426 memcpy(args, vdev->args, nargs * sizeof(args[0])); 427 428 return err; 429 } 430 431 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, uint16_t index) 432 { 433 vdev->proxy = PROXY_BY_INDEX; 434 vdev->proxy_index = index; 435 } 436 437 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) 438 { 439 vdev->proxy = PROXY_NONE; 440 vdev->proxy_index = 0; 441 } 442 443 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 444 uint64_t *a0, uint64_t *a1, int wait) 445 { 446 uint64_t args[2]; 447 bool vf_rep; 448 int vf_idx; 449 int err; 450 451 vf_rep = false; 452 if (vdev->pf_vdev) { 453 vf_rep = true; 454 vf_idx = vdev->vf_id; 455 /* Everything below assumes PF vdev */ 456 vdev = vdev->pf_vdev; 457 } 458 if (vdev->lock) 459 vdev->lock(vdev->priv); 460 /* For VF representor, proxy devcmd to VF index */ 461 if (vf_rep) 462 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 463 464 args[0] = *a0; 465 args[1] = *a1; 466 memset(vdev->args, 0, sizeof(vdev->args)); 467 468 switch (vdev->proxy) { 469 case PROXY_BY_INDEX: 470 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 471 args, ARRAY_SIZE(args), wait); 472 break; 473 case PROXY_BY_BDF: 474 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 475 args, ARRAY_SIZE(args), wait); 476 break; 477 case PROXY_NONE: 478 default: 479 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); 480 break; 481 } 482 483 if (vf_rep) 484 vnic_dev_cmd_proxy_end(vdev); 485 if (vdev->unlock) 486 vdev->unlock(vdev->priv); 487 if (err == 0) { 488 *a0 = args[0]; 489 *a1 = args[1]; 490 } 491 492 return err; 493 } 494 495 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 496 uint64_t *args, int nargs, int wait) 497 { 498 bool vf_rep; 499 int vf_idx; 500 int err; 501 502 vf_rep = false; 503 if (vdev->pf_vdev) { 504 vf_rep = true; 505 vf_idx = vdev->vf_id; 506 vdev = vdev->pf_vdev; 507 } 508 if (vdev->lock) 509 vdev->lock(vdev->priv); 510 if (vf_rep) 511 vnic_dev_cmd_proxy_by_index_start(vdev, vf_idx); 512 513 switch (vdev->proxy) { 514 case PROXY_BY_INDEX: 515 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 516 args, nargs, wait); 517 break; 518 case PROXY_BY_BDF: 519 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 520 args, nargs, wait); 521 break; 522 case PROXY_NONE: 523 default: 524 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); 525 break; 526 } 527 528 if (vf_rep) 529 vnic_dev_cmd_proxy_end(vdev); 530 if (vdev->unlock) 531 vdev->unlock(vdev->priv); 532 return err; 533 } 534 535 int vnic_dev_fw_info(struct vnic_dev *vdev, 536 struct vnic_devcmd_fw_info **fw_info) 537 { 538 char name[RTE_MEMZONE_NAMESIZE]; 539 uint64_t a0, a1 = 0; 540 int wait = 1000; 541 int err = 0; 542 static uint32_t instance; 543 544 if (!vdev->fw_info) { 545 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u", 546 instance++); 547 vdev->fw_info = vdev->alloc_consistent(vdev->priv, 548 sizeof(struct vnic_devcmd_fw_info), 549 &vdev->fw_info_pa, (uint8_t *)name); 550 if (!vdev->fw_info) 551 return -ENOMEM; 552 a0 = vdev->fw_info_pa; 553 a1 = sizeof(struct vnic_devcmd_fw_info); 554 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, 555 &a0, &a1, wait); 556 } 557 *fw_info = vdev->fw_info; 558 return err; 559 } 560 561 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args, 562 int nargs) 563 { 564 memset(args, 0, nargs * sizeof(*args)); 565 args[0] = CMD_ADD_ADV_FILTER; 566 args[1] = FILTER_CAP_MODE_V1_FLAG; 567 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); 568 } 569 570 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) 571 { 572 uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0; 573 int wait = 1000; 574 int err; 575 576 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 577 if (err) 578 return 0; 579 return (a1 >= (uint32_t)FILTER_DPDK_1); 580 } 581 582 int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs) 583 { 584 int wait = 1000; 585 586 return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait); 587 } 588 589 static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode, 590 uint8_t *filter_actions) 591 { 592 char name[RTE_MEMZONE_NAMESIZE]; 593 uint64_t args[3]; 594 uint64_t ops; 595 static uint32_t instance; 596 597 /* flowman devcmd available? */ 598 if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP)) 599 return 0; 600 /* Have the version we are using? */ 601 args[0] = FM_API_VERSION_QUERY; 602 if (vnic_dev_flowman_cmd(vdev, args, 1)) 603 return 0; 604 if ((args[0] & (1ULL << FM_VERSION)) == 0) 605 return 0; 606 /* Select the version */ 607 args[0] = FM_API_VERSION_SELECT; 608 args[1] = FM_VERSION; 609 if (vnic_dev_flowman_cmd(vdev, args, 2)) 610 return 0; 611 /* Can we get fm_info? */ 612 if (!vdev->flowman_info) { 613 snprintf((char *)name, sizeof(name), "vnic_fm_info-%u", 614 instance++); 615 vdev->flowman_info = vdev->alloc_consistent(vdev->priv, 616 sizeof(struct fm_info), 617 &vdev->flowman_info_pa, (uint8_t *)name); 618 if (!vdev->flowman_info) 619 return 0; 620 } 621 args[0] = FM_INFO_QUERY; 622 args[1] = vdev->flowman_info_pa; 623 args[2] = sizeof(struct fm_info); 624 if (vnic_dev_flowman_cmd(vdev, args, 3)) 625 return 0; 626 /* Have required operations? */ 627 ops = (1ULL << FMOP_END) | 628 (1ULL << FMOP_DROP) | 629 (1ULL << FMOP_RQ_STEER) | 630 (1ULL << FMOP_EXACT_MATCH) | 631 (1ULL << FMOP_MARK) | 632 (1ULL << FMOP_TAG) | 633 (1ULL << FMOP_EG_HAIRPIN) | 634 (1ULL << FMOP_ENCAP) | 635 (1ULL << FMOP_DECAP_NOSTRIP); 636 if ((vdev->flowman_info->fm_op_mask & ops) != ops) 637 return 0; 638 /* Good to use flowman now */ 639 *mode = FILTER_FLOWMAN; 640 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG | 641 FILTER_ACTION_FILTER_ID_FLAG | 642 FILTER_ACTION_COUNTER_FLAG | 643 FILTER_ACTION_DROP_FLAG; 644 return 1; 645 } 646 647 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 4 648 * value or 0 on error: 649 * FILTER_FLOWMAN- flowman api capable 650 * FILTER_DPDK_1- advanced filters availabile 651 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that 652 * the IP layer must explicitly specified. I.e. cannot have a UDP 653 * filter that matches both IPv4 and IPv6. 654 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. 655 * all other filter types are not available. 656 * Retrun true in filter_tags if supported 657 */ 658 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode, 659 uint8_t *filter_actions) 660 { 661 uint64_t args[4]; 662 int err; 663 uint32_t max_level = 0; 664 665 /* If flowman is available, use it as it is the most capable API */ 666 if (vnic_dev_flowman_enable(vdev, mode, filter_actions)) 667 return 0; 668 669 err = vnic_dev_advanced_filters_cap(vdev, args, 4); 670 671 /* determine supported filter actions */ 672 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ 673 if (args[2] == FILTER_CAP_MODE_V1) 674 *filter_actions = args[3]; 675 676 if (err || ((args[0] == 1) && (args[1] == 0))) { 677 /* Adv filter Command not supported or adv filters available but 678 * not enabled. Try the normal filter capability command. 679 */ 680 args[0] = CMD_ADD_FILTER; 681 args[1] = 0; 682 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); 683 if (err) 684 return err; 685 max_level = args[1]; 686 goto parse_max_level; 687 } else if (args[2] == FILTER_CAP_MODE_V1) { 688 /* parse filter capability mask in args[1] */ 689 if (args[1] & FILTER_DPDK_1_FLAG) 690 *mode = FILTER_DPDK_1; 691 else if (args[1] & FILTER_USNIC_IP_FLAG) 692 *mode = FILTER_USNIC_IP; 693 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) 694 *mode = FILTER_IPV4_5TUPLE; 695 return 0; 696 } 697 max_level = args[1]; 698 parse_max_level: 699 if (max_level >= (uint32_t)FILTER_USNIC_IP) 700 *mode = FILTER_USNIC_IP; 701 else 702 *mode = FILTER_IPV4_5TUPLE; 703 return 0; 704 } 705 706 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, 707 bool *weak) 708 { 709 uint64_t a0 = CMD_NIC_CFG, a1 = 0; 710 int wait = 1000; 711 int err; 712 713 *cfg_chk = false; 714 *weak = false; 715 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 716 if (err == 0 && a0 != 0 && a1 != 0) { 717 *cfg_chk = true; 718 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); 719 } 720 } 721 722 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 723 { 724 uint64_t a0 = (uint32_t)cmd, a1 = 0; 725 int wait = 1000; 726 int err; 727 728 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 729 730 return !(err || a0); 731 } 732 733 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, 734 void *value) 735 { 736 uint64_t a0, a1; 737 int wait = 1000; 738 int err; 739 740 a0 = offset; 741 a1 = size; 742 743 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 744 745 switch (size) { 746 case 1: 747 *(uint8_t *)value = (uint8_t)a0; 748 break; 749 case 2: 750 *(uint16_t *)value = (uint16_t)a0; 751 break; 752 case 4: 753 *(uint32_t *)value = (uint32_t)a0; 754 break; 755 case 8: 756 *(uint64_t *)value = a0; 757 break; 758 default: 759 BUG(); 760 break; 761 } 762 763 return err; 764 } 765 766 int vnic_dev_stats_clear(struct vnic_dev *vdev) 767 { 768 uint64_t a0 = 0, a1 = 0; 769 int wait = 1000; 770 771 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 772 } 773 774 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 775 { 776 uint64_t a0, a1; 777 int wait = 1000; 778 779 if (!vdev->stats) 780 return -ENOMEM; 781 782 *stats = vdev->stats; 783 a0 = vdev->stats_pa; 784 a1 = sizeof(struct vnic_stats); 785 786 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 787 } 788 789 int vnic_dev_close(struct vnic_dev *vdev) 790 { 791 uint64_t a0 = 0, a1 = 0; 792 int wait = 1000; 793 794 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 795 } 796 797 int vnic_dev_enable_wait(struct vnic_dev *vdev) 798 { 799 uint64_t a0 = 0, a1 = 0; 800 int wait = 1000; 801 802 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 803 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 804 else 805 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 806 } 807 808 int vnic_dev_disable(struct vnic_dev *vdev) 809 { 810 uint64_t a0 = 0, a1 = 0; 811 int wait = 1000; 812 813 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 814 } 815 816 int vnic_dev_open(struct vnic_dev *vdev, int arg) 817 { 818 uint64_t a0 = (uint32_t)arg, a1 = 0; 819 int wait = 1000; 820 821 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 822 } 823 824 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 825 { 826 uint64_t a0 = 0, a1 = 0; 827 int wait = 1000; 828 int err; 829 830 *done = 0; 831 832 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 833 if (err) 834 return err; 835 836 *done = (a0 == 0); 837 838 return 0; 839 } 840 841 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr) 842 { 843 uint64_t a0 = 0, a1 = 0; 844 int wait = 1000; 845 int err, i; 846 847 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 848 mac_addr[i] = 0; 849 850 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 851 if (err) 852 return err; 853 854 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 855 mac_addr[i] = ((uint8_t *)&a0)[i]; 856 857 return 0; 858 } 859 860 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 861 int broadcast, int promisc, int allmulti) 862 { 863 uint64_t a0, a1 = 0; 864 int wait = 1000; 865 int err; 866 867 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 868 (multicast ? CMD_PFILTER_MULTICAST : 0) | 869 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 870 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 871 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 872 873 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 874 if (err) 875 pr_err("Can't set packet filter\n"); 876 877 return err; 878 } 879 880 int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr) 881 { 882 uint64_t a0 = 0, a1 = 0; 883 int wait = 1000; 884 int err; 885 int i; 886 887 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 888 ((uint8_t *)&a0)[i] = addr[i]; 889 890 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 891 if (err) 892 pr_err("Can't add addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 893 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 894 err); 895 896 return err; 897 } 898 899 int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr) 900 { 901 uint64_t a0 = 0, a1 = 0; 902 int wait = 1000; 903 int err; 904 int i; 905 906 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 907 ((uint8_t *)&a0)[i] = addr[i]; 908 909 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 910 if (err) 911 pr_err("Can't del addr [" RTE_ETHER_ADDR_PRT_FMT "], %d\n", 912 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 913 err); 914 915 return err; 916 } 917 918 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 919 uint8_t ig_vlan_rewrite_mode) 920 { 921 uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0; 922 int wait = 1000; 923 924 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 925 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 926 &a0, &a1, wait); 927 else 928 return 0; 929 } 930 931 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) 932 { 933 vdev->in_reset = state; 934 } 935 936 static inline int vnic_dev_in_reset(struct vnic_dev *vdev) 937 { 938 return vdev->in_reset; 939 } 940 941 int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 942 void *notify_addr, dma_addr_t notify_pa, uint16_t intr) 943 { 944 uint64_t a0, a1; 945 int wait = 1000; 946 int r; 947 948 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 949 if (!vnic_dev_in_reset(vdev)) { 950 vdev->notify = notify_addr; 951 vdev->notify_pa = notify_pa; 952 } 953 954 a0 = (uint64_t)notify_pa; 955 a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL; 956 a1 += sizeof(struct vnic_devcmd_notify); 957 958 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 959 if (!vnic_dev_in_reset(vdev)) 960 vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0; 961 962 return r; 963 } 964 965 int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr) 966 { 967 void *notify_addr = NULL; 968 dma_addr_t notify_pa = 0; 969 char name[RTE_MEMZONE_NAMESIZE]; 970 static uint32_t instance; 971 972 if (vdev->notify || vdev->notify_pa) { 973 return vnic_dev_notify_setcmd(vdev, vdev->notify, 974 vdev->notify_pa, intr); 975 } 976 if (!vnic_dev_in_reset(vdev)) { 977 snprintf((char *)name, sizeof(name), 978 "vnic_notify-%u", instance++); 979 notify_addr = vdev->alloc_consistent(vdev->priv, 980 sizeof(struct vnic_devcmd_notify), 981 ¬ify_pa, (uint8_t *)name); 982 if (!notify_addr) 983 return -ENOMEM; 984 } 985 986 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 987 } 988 989 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 990 { 991 uint64_t a0, a1; 992 int wait = 1000; 993 int err; 994 995 a0 = 0; /* paddr = 0 to unset notify buffer */ 996 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 997 a1 += sizeof(struct vnic_devcmd_notify); 998 999 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 1000 if (!vnic_dev_in_reset(vdev)) { 1001 vdev->notify = NULL; 1002 vdev->notify_pa = 0; 1003 vdev->notify_sz = 0; 1004 } 1005 1006 return err; 1007 } 1008 1009 int vnic_dev_notify_unset(struct vnic_dev *vdev) 1010 { 1011 if (vdev->notify && !vnic_dev_in_reset(vdev)) { 1012 vdev->free_consistent(vdev->priv, 1013 sizeof(struct vnic_devcmd_notify), 1014 vdev->notify, 1015 vdev->notify_pa); 1016 } 1017 1018 return vnic_dev_notify_unsetcmd(vdev); 1019 } 1020 1021 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 1022 { 1023 uint32_t *words; 1024 unsigned int nwords = vdev->notify_sz / 4; 1025 unsigned int i; 1026 uint32_t csum; 1027 1028 if (!vdev->notify || !vdev->notify_sz) 1029 return 0; 1030 1031 do { 1032 csum = 0; 1033 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 1034 words = (uint32_t *)&vdev->notify_copy; 1035 for (i = 1; i < nwords; i++) 1036 csum += words[i]; 1037 } while (csum != words[0]); 1038 1039 return 1; 1040 } 1041 1042 int vnic_dev_init(struct vnic_dev *vdev, int arg) 1043 { 1044 uint64_t a0 = (uint32_t)arg, a1 = 0; 1045 int wait = 1000; 1046 int r = 0; 1047 1048 if (vnic_dev_capable(vdev, CMD_INIT)) 1049 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 1050 else { 1051 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 1052 if (a0 & CMD_INITF_DEFAULT_MAC) { 1053 /* Emulate these for old CMD_INIT_v1 which 1054 * didn't pass a0 so no CMD_INITF_*. 1055 */ 1056 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 1057 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 1058 } 1059 } 1060 return r; 1061 } 1062 1063 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 1064 { 1065 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 1066 vdev->intr_coal_timer_info.mul = 2; 1067 vdev->intr_coal_timer_info.div = 3; 1068 vdev->intr_coal_timer_info.max_usec = 1069 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 1070 } 1071 1072 int vnic_dev_link_status(struct vnic_dev *vdev) 1073 { 1074 if (!vnic_dev_notify_ready(vdev)) 1075 return 0; 1076 1077 return vdev->notify_copy.link_state; 1078 } 1079 1080 uint32_t vnic_dev_port_speed(struct vnic_dev *vdev) 1081 { 1082 if (!vnic_dev_notify_ready(vdev)) 1083 return 0; 1084 1085 return vdev->notify_copy.port_speed; 1086 } 1087 1088 uint32_t vnic_dev_mtu(struct vnic_dev *vdev) 1089 { 1090 if (!vnic_dev_notify_ready(vdev)) 1091 return 0; 1092 1093 return vdev->notify_copy.mtu; 1094 } 1095 1096 uint32_t vnic_dev_uif(struct vnic_dev *vdev) 1097 { 1098 if (!vnic_dev_notify_ready(vdev)) 1099 return 0; 1100 1101 return vdev->notify_copy.uif; 1102 } 1103 1104 uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, 1105 uint32_t usec) 1106 { 1107 return (usec * vdev->intr_coal_timer_info.mul) / 1108 vdev->intr_coal_timer_info.div; 1109 } 1110 1111 uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, 1112 uint32_t hw_cycles) 1113 { 1114 return (hw_cycles * vdev->intr_coal_timer_info.div) / 1115 vdev->intr_coal_timer_info.mul; 1116 } 1117 1118 uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 1119 { 1120 return vdev->intr_coal_timer_info.max_usec; 1121 } 1122 1123 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) 1124 { 1125 char name[RTE_MEMZONE_NAMESIZE]; 1126 static uint32_t instance; 1127 1128 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); 1129 vdev->stats = vdev->alloc_consistent(vdev->priv, 1130 sizeof(struct vnic_stats), 1131 &vdev->stats_pa, (uint8_t *)name); 1132 return vdev->stats == NULL ? -ENOMEM : 0; 1133 } 1134 1135 void vnic_dev_unregister(struct vnic_dev *vdev) 1136 { 1137 if (vdev) { 1138 if (vdev->notify) 1139 vdev->free_consistent(vdev->priv, 1140 sizeof(struct vnic_devcmd_notify), 1141 vdev->notify, 1142 vdev->notify_pa); 1143 if (vdev->stats) 1144 vdev->free_consistent(vdev->priv, 1145 sizeof(struct vnic_stats), 1146 vdev->stats, vdev->stats_pa); 1147 if (vdev->flowman_info) 1148 vdev->free_consistent(vdev->priv, 1149 sizeof(struct fm_info), 1150 vdev->flowman_info, vdev->flowman_info_pa); 1151 if (vdev->fw_info) 1152 vdev->free_consistent(vdev->priv, 1153 sizeof(struct vnic_devcmd_fw_info), 1154 vdev->fw_info, vdev->fw_info_pa); 1155 rte_free(vdev); 1156 } 1157 } 1158 1159 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 1160 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, 1161 unsigned int num_bars) 1162 { 1163 if (!vdev) { 1164 char name[RTE_MEMZONE_NAMESIZE]; 1165 snprintf((char *)name, sizeof(name), "%s-vnic", 1166 pdev->device.name); 1167 vdev = (struct vnic_dev *)rte_zmalloc_socket(name, 1168 sizeof(struct vnic_dev), 1169 RTE_CACHE_LINE_SIZE, 1170 pdev->device.numa_node); 1171 if (!vdev) 1172 return NULL; 1173 } 1174 1175 vdev->priv = priv; 1176 vdev->pdev = pdev; 1177 1178 if (vnic_dev_discover_res(vdev, bar, num_bars)) 1179 goto err_out; 1180 1181 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 1182 if (!vdev->devcmd) 1183 goto err_out; 1184 1185 return vdev; 1186 1187 err_out: 1188 vnic_dev_unregister(vdev); 1189 return NULL; 1190 } 1191 1192 struct vnic_dev *vnic_vf_rep_register(void *priv, struct vnic_dev *pf_vdev, 1193 int vf_id) 1194 { 1195 struct vnic_dev *vdev; 1196 1197 vdev = (struct vnic_dev *)rte_zmalloc("enic-vf-rep-vdev", 1198 sizeof(struct vnic_dev), RTE_CACHE_LINE_SIZE); 1199 if (!vdev) 1200 return NULL; 1201 vdev->priv = priv; 1202 vdev->pf_vdev = pf_vdev; 1203 vdev->vf_id = vf_id; 1204 vdev->alloc_consistent = pf_vdev->alloc_consistent; 1205 vdev->free_consistent = pf_vdev->free_consistent; 1206 return vdev; 1207 } 1208 1209 /* 1210 * vnic_dev_classifier: Add/Delete classifier entries 1211 * @vdev: vdev of the device 1212 * @cmd: CLSF_ADD for Add filter 1213 * CLSF_DEL for Delete filter 1214 * @entry: In case of ADD filter, the caller passes the RQ number in this 1215 * variable. 1216 * This function stores the filter_id returned by the 1217 * firmware in the same variable before return; 1218 * 1219 * In case of DEL filter, the caller passes the RQ number. Return 1220 * value is irrelevant. 1221 * @data: filter data 1222 * @action: action data 1223 */ 1224 int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry, 1225 struct filter_v2 *data, struct filter_action_v2 *action_v2) 1226 { 1227 uint64_t a0 = 0, a1 = 0; 1228 int wait = 1000; 1229 dma_addr_t tlv_pa; 1230 int ret = -EINVAL; 1231 struct filter_tlv *tlv, *tlv_va; 1232 uint64_t tlv_size; 1233 uint32_t filter_size, action_size; 1234 static unsigned int unique_id; 1235 char z_name[RTE_MEMZONE_NAMESIZE]; 1236 enum vnic_devcmd_cmd dev_cmd; 1237 1238 if (cmd == CLSF_ADD) { 1239 dev_cmd = (data->type >= FILTER_DPDK_1) ? 1240 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; 1241 1242 filter_size = vnic_filter_size(data); 1243 action_size = vnic_action_size(action_v2); 1244 1245 tlv_size = filter_size + action_size + 1246 2*sizeof(struct filter_tlv); 1247 snprintf((char *)z_name, sizeof(z_name), 1248 "vnic_clsf_%u", unique_id++); 1249 tlv_va = vdev->alloc_consistent(vdev->priv, 1250 tlv_size, &tlv_pa, (uint8_t *)z_name); 1251 if (!tlv_va) 1252 return -ENOMEM; 1253 tlv = tlv_va; 1254 a0 = tlv_pa; 1255 a1 = tlv_size; 1256 memset(tlv, 0, tlv_size); 1257 tlv->type = CLSF_TLV_FILTER; 1258 tlv->length = filter_size; 1259 memcpy(&tlv->val, (void *)data, filter_size); 1260 1261 tlv = (struct filter_tlv *)((char *)tlv + 1262 sizeof(struct filter_tlv) + 1263 filter_size); 1264 1265 tlv->type = CLSF_TLV_ACTION; 1266 tlv->length = action_size; 1267 memcpy(&tlv->val, (void *)action_v2, action_size); 1268 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); 1269 *entry = (uint16_t)a0; 1270 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); 1271 } else if (cmd == CLSF_DEL) { 1272 a0 = *entry; 1273 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1274 } 1275 1276 return ret; 1277 } 1278 1279 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay, 1280 uint8_t config) 1281 { 1282 uint64_t a0 = overlay; 1283 uint64_t a1 = config; 1284 int wait = 1000; 1285 1286 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); 1287 } 1288 1289 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay, 1290 uint16_t vxlan_udp_port_number) 1291 { 1292 uint64_t a1 = vxlan_udp_port_number; 1293 uint64_t a0 = overlay; 1294 int wait = 1000; 1295 1296 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); 1297 } 1298 1299 int vnic_dev_capable_vxlan(struct vnic_dev *vdev) 1300 { 1301 uint64_t a0 = VIC_FEATURE_VXLAN; 1302 uint64_t a1 = 0; 1303 int wait = 1000; 1304 int ret; 1305 1306 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1307 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ 1308 return ret == 0 && 1309 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == 1310 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); 1311 } 1312 1313 int vnic_dev_capable_geneve(struct vnic_dev *vdev) 1314 { 1315 uint64_t a0 = VIC_FEATURE_GENEVE; 1316 uint64_t a1 = 0; 1317 int wait = 1000; 1318 int ret; 1319 1320 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1321 return ret == 0 && !!(a1 & FEATURE_GENEVE_OPTIONS); 1322 } 1323 1324 uint64_t vnic_dev_capable_cq_entry_size(struct vnic_dev *vdev) 1325 { 1326 uint64_t a0 = CMD_CQ_ENTRY_SIZE_SET; 1327 uint64_t a1 = 0; 1328 int wait = 1000; 1329 int ret; 1330 1331 ret = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 1332 /* All models support 16B CQ entry by default */ 1333 if (!(ret == 0 && a0 == 0)) 1334 a1 = VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE; 1335 return a1; 1336 } 1337 1338 int vnic_dev_set_cq_entry_size(struct vnic_dev *vdev, uint32_t rq_idx, 1339 uint32_t size_flag) 1340 { 1341 uint64_t a0 = rq_idx; 1342 uint64_t a1 = size_flag; 1343 int wait = 1000; 1344 1345 return vnic_dev_cmd(vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); 1346 } 1347