1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <rte_memzone.h> 7 #include <rte_memcpy.h> 8 #include <rte_string_fns.h> 9 10 #include "vnic_dev.h" 11 #include "vnic_resource.h" 12 #include "vnic_devcmd.h" 13 #include "vnic_nic.h" 14 #include "vnic_stats.h" 15 16 17 enum vnic_proxy_type { 18 PROXY_NONE, 19 PROXY_BY_BDF, 20 PROXY_BY_INDEX, 21 }; 22 23 struct vnic_res { 24 void __iomem *vaddr; 25 dma_addr_t bus_addr; 26 unsigned int count; 27 }; 28 29 struct vnic_intr_coal_timer_info { 30 u32 mul; 31 u32 div; 32 u32 max_usec; 33 }; 34 35 struct vnic_dev { 36 void *priv; 37 struct rte_pci_device *pdev; 38 struct vnic_res res[RES_TYPE_MAX]; 39 enum vnic_dev_intr_mode intr_mode; 40 struct vnic_devcmd __iomem *devcmd; 41 struct vnic_devcmd_notify *notify; 42 struct vnic_devcmd_notify notify_copy; 43 dma_addr_t notify_pa; 44 u32 notify_sz; 45 dma_addr_t linkstatus_pa; 46 struct vnic_stats *stats; 47 dma_addr_t stats_pa; 48 struct vnic_devcmd_fw_info *fw_info; 49 dma_addr_t fw_info_pa; 50 enum vnic_proxy_type proxy; 51 u32 proxy_index; 52 u64 args[VNIC_DEVCMD_NARGS]; 53 int in_reset; 54 struct vnic_intr_coal_timer_info intr_coal_timer_info; 55 void *(*alloc_consistent)(void *priv, size_t size, 56 dma_addr_t *dma_handle, u8 *name); 57 void (*free_consistent)(void *priv, 58 size_t size, void *vaddr, 59 dma_addr_t dma_handle); 60 }; 61 62 #define VNIC_MAX_RES_HDR_SIZE \ 63 (sizeof(struct vnic_resource_header) + \ 64 sizeof(struct vnic_resource) * RES_TYPE_MAX) 65 #define VNIC_RES_STRIDE 128 66 67 void *vnic_dev_priv(struct vnic_dev *vdev) 68 { 69 return vdev->priv; 70 } 71 72 void vnic_register_cbacks(struct vnic_dev *vdev, 73 void *(*alloc_consistent)(void *priv, size_t size, 74 dma_addr_t *dma_handle, u8 *name), 75 void (*free_consistent)(void *priv, 76 size_t size, void *vaddr, 77 dma_addr_t dma_handle)) 78 { 79 vdev->alloc_consistent = alloc_consistent; 80 vdev->free_consistent = free_consistent; 81 } 82 83 static int vnic_dev_discover_res(struct vnic_dev *vdev, 84 struct vnic_dev_bar *bar, unsigned int num_bars) 85 { 86 struct vnic_resource_header __iomem *rh; 87 struct mgmt_barmap_hdr __iomem *mrh; 88 struct vnic_resource __iomem *r; 89 u8 type; 90 91 if (num_bars == 0) 92 return -EINVAL; 93 94 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 95 pr_err("vNIC BAR0 res hdr length error\n"); 96 return -EINVAL; 97 } 98 99 rh = bar->vaddr; 100 mrh = bar->vaddr; 101 if (!rh) { 102 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 103 return -EINVAL; 104 } 105 106 /* Check for mgmt vnic in addition to normal vnic */ 107 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 108 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 109 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 110 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 111 pr_err("vNIC BAR0 res magic/version error " \ 112 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 113 VNIC_RES_MAGIC, VNIC_RES_VERSION, 114 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 115 ioread32(&rh->magic), ioread32(&rh->version)); 116 return -EINVAL; 117 } 118 } 119 120 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 121 r = (struct vnic_resource __iomem *)(mrh + 1); 122 else 123 r = (struct vnic_resource __iomem *)(rh + 1); 124 125 126 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 127 u8 bar_num = ioread8(&r->bar); 128 u32 bar_offset = ioread32(&r->bar_offset); 129 u32 count = ioread32(&r->count); 130 u32 len; 131 132 r++; 133 134 if (bar_num >= num_bars) 135 continue; 136 137 if (!bar[bar_num].len || !bar[bar_num].vaddr) 138 continue; 139 140 switch (type) { 141 case RES_TYPE_WQ: 142 case RES_TYPE_RQ: 143 case RES_TYPE_CQ: 144 case RES_TYPE_INTR_CTRL: 145 /* each count is stride bytes long */ 146 len = count * VNIC_RES_STRIDE; 147 if (len + bar_offset > bar[bar_num].len) { 148 pr_err("vNIC BAR0 resource %d " \ 149 "out-of-bounds, offset 0x%x + " \ 150 "size 0x%x > bar len 0x%lx\n", 151 type, bar_offset, 152 len, 153 bar[bar_num].len); 154 return -EINVAL; 155 } 156 break; 157 case RES_TYPE_INTR_PBA_LEGACY: 158 case RES_TYPE_DEVCMD: 159 len = count; 160 break; 161 default: 162 continue; 163 } 164 165 vdev->res[type].count = count; 166 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 167 bar_offset; 168 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 169 } 170 171 return 0; 172 } 173 174 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 175 enum vnic_res_type type) 176 { 177 return vdev->res[type].count; 178 } 179 180 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 181 unsigned int index) 182 { 183 if (!vdev->res[type].vaddr) 184 return NULL; 185 186 switch (type) { 187 case RES_TYPE_WQ: 188 case RES_TYPE_RQ: 189 case RES_TYPE_CQ: 190 case RES_TYPE_INTR_CTRL: 191 return (char __iomem *)vdev->res[type].vaddr + 192 index * VNIC_RES_STRIDE; 193 default: 194 return (char __iomem *)vdev->res[type].vaddr; 195 } 196 } 197 198 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 199 unsigned int desc_count, unsigned int desc_size) 200 { 201 /* The base address of the desc rings must be 512 byte aligned. 202 * Descriptor count is aligned to groups of 32 descriptors. A 203 * count of 0 means the maximum 4096 descriptors. Descriptor 204 * size is aligned to 16 bytes. 205 */ 206 207 unsigned int count_align = 32; 208 unsigned int desc_align = 16; 209 210 ring->base_align = 512; 211 212 if (desc_count == 0) 213 desc_count = 4096; 214 215 ring->desc_count = VNIC_ALIGN(desc_count, count_align); 216 217 ring->desc_size = VNIC_ALIGN(desc_size, desc_align); 218 219 ring->size = ring->desc_count * ring->desc_size; 220 ring->size_unaligned = ring->size + ring->base_align; 221 222 return ring->size_unaligned; 223 } 224 225 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 226 { 227 memset(ring->descs, 0, ring->size); 228 } 229 230 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, 231 struct vnic_dev_ring *ring, 232 unsigned int desc_count, unsigned int desc_size, 233 __attribute__((unused)) unsigned int socket_id, 234 char *z_name) 235 { 236 void *alloc_addr; 237 dma_addr_t alloc_pa = 0; 238 239 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 240 alloc_addr = vdev->alloc_consistent(vdev->priv, 241 ring->size_unaligned, 242 &alloc_pa, (u8 *)z_name); 243 if (!alloc_addr) { 244 pr_err("Failed to allocate ring (size=%d), aborting\n", 245 (int)ring->size); 246 return -ENOMEM; 247 } 248 ring->descs_unaligned = alloc_addr; 249 if (!alloc_pa) { 250 pr_err("Failed to map allocated ring (size=%d), aborting\n", 251 (int)ring->size); 252 vdev->free_consistent(vdev->priv, 253 ring->size_unaligned, 254 alloc_addr, 255 alloc_pa); 256 return -ENOMEM; 257 } 258 ring->base_addr_unaligned = alloc_pa; 259 260 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, 261 ring->base_align); 262 ring->descs = (u8 *)ring->descs_unaligned + 263 (ring->base_addr - ring->base_addr_unaligned); 264 265 vnic_dev_clear_desc_ring(ring); 266 267 ring->desc_avail = ring->desc_count - 1; 268 269 return 0; 270 } 271 272 void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev, 273 struct vnic_dev_ring *ring) 274 { 275 if (ring->descs) { 276 vdev->free_consistent(vdev->priv, 277 ring->size_unaligned, 278 ring->descs_unaligned, 279 ring->base_addr_unaligned); 280 ring->descs = NULL; 281 } 282 } 283 284 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 285 int wait) 286 { 287 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 288 unsigned int i; 289 int delay; 290 u32 status; 291 int err; 292 293 status = ioread32(&devcmd->status); 294 if (status == 0xFFFFFFFF) { 295 /* PCI-e target device is gone */ 296 return -ENODEV; 297 } 298 if (status & STAT_BUSY) { 299 300 pr_err("Busy devcmd %d\n", _CMD_N(cmd)); 301 return -EBUSY; 302 } 303 304 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 305 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 306 writeq(vdev->args[i], &devcmd->args[i]); 307 wmb(); /* complete all writes initiated till now */ 308 } 309 310 iowrite32(cmd, &devcmd->cmd); 311 312 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 313 return 0; 314 315 for (delay = 0; delay < wait; delay++) { 316 317 udelay(100); 318 319 status = ioread32(&devcmd->status); 320 if (status == 0xFFFFFFFF) { 321 /* PCI-e target device is gone */ 322 return -ENODEV; 323 } 324 325 if (!(status & STAT_BUSY)) { 326 if (status & STAT_ERROR) { 327 err = -(int)readq(&devcmd->args[0]); 328 if (cmd != CMD_CAPABILITY) 329 pr_err("Devcmd %d failed " \ 330 "with error code %d\n", 331 _CMD_N(cmd), err); 332 return err; 333 } 334 335 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 336 rmb();/* finish all reads initiated till now */ 337 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 338 vdev->args[i] = readq(&devcmd->args[i]); 339 } 340 341 return 0; 342 } 343 } 344 345 pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); 346 return -ETIMEDOUT; 347 } 348 349 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 350 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 351 u64 *args, int nargs, int wait) 352 { 353 u32 status; 354 int err; 355 356 /* 357 * Proxy command consumes 2 arguments. One for proxy index, 358 * the other is for command to be proxied 359 */ 360 if (nargs > VNIC_DEVCMD_NARGS - 2) { 361 pr_err("number of args %d exceeds the maximum\n", nargs); 362 return -EINVAL; 363 } 364 memset(vdev->args, 0, sizeof(vdev->args)); 365 366 vdev->args[0] = vdev->proxy_index; 367 vdev->args[1] = cmd; 368 memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); 369 370 err = _vnic_dev_cmd(vdev, proxy_cmd, wait); 371 if (err) 372 return err; 373 374 status = (u32)vdev->args[0]; 375 if (status & STAT_ERROR) { 376 err = (int)vdev->args[1]; 377 if (err != ERR_ECMDUNKNOWN || 378 cmd != CMD_CAPABILITY) 379 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); 380 return err; 381 } 382 383 memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); 384 385 return 0; 386 } 387 388 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 389 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) 390 { 391 int err; 392 393 if (nargs > VNIC_DEVCMD_NARGS) { 394 pr_err("number of args %d exceeds the maximum\n", nargs); 395 return -EINVAL; 396 } 397 memset(vdev->args, 0, sizeof(vdev->args)); 398 memcpy(vdev->args, args, nargs * sizeof(args[0])); 399 400 err = _vnic_dev_cmd(vdev, cmd, wait); 401 402 memcpy(args, vdev->args, nargs * sizeof(args[0])); 403 404 return err; 405 } 406 407 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 408 u64 *a0, u64 *a1, int wait) 409 { 410 u64 args[2]; 411 int err; 412 413 args[0] = *a0; 414 args[1] = *a1; 415 memset(vdev->args, 0, sizeof(vdev->args)); 416 417 switch (vdev->proxy) { 418 case PROXY_BY_INDEX: 419 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 420 args, ARRAY_SIZE(args), wait); 421 break; 422 case PROXY_BY_BDF: 423 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 424 args, ARRAY_SIZE(args), wait); 425 break; 426 case PROXY_NONE: 427 default: 428 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); 429 break; 430 } 431 432 if (err == 0) { 433 *a0 = args[0]; 434 *a1 = args[1]; 435 } 436 437 return err; 438 } 439 440 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 441 u64 *args, int nargs, int wait) 442 { 443 switch (vdev->proxy) { 444 case PROXY_BY_INDEX: 445 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 446 args, nargs, wait); 447 case PROXY_BY_BDF: 448 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 449 args, nargs, wait); 450 case PROXY_NONE: 451 default: 452 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); 453 } 454 } 455 456 int vnic_dev_fw_info(struct vnic_dev *vdev, 457 struct vnic_devcmd_fw_info **fw_info) 458 { 459 char name[NAME_MAX]; 460 u64 a0, a1 = 0; 461 int wait = 1000; 462 int err = 0; 463 static u32 instance; 464 465 if (!vdev->fw_info) { 466 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u", 467 instance++); 468 vdev->fw_info = vdev->alloc_consistent(vdev->priv, 469 sizeof(struct vnic_devcmd_fw_info), 470 &vdev->fw_info_pa, (u8 *)name); 471 if (!vdev->fw_info) 472 return -ENOMEM; 473 a0 = vdev->fw_info_pa; 474 a1 = sizeof(struct vnic_devcmd_fw_info); 475 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, 476 &a0, &a1, wait); 477 } 478 *fw_info = vdev->fw_info; 479 return err; 480 } 481 482 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args, 483 int nargs) 484 { 485 memset(args, 0, nargs * sizeof(*args)); 486 args[0] = CMD_ADD_ADV_FILTER; 487 args[1] = FILTER_CAP_MODE_V1_FLAG; 488 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); 489 } 490 491 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) 492 { 493 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0; 494 int wait = 1000; 495 int err; 496 497 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 498 if (err) 499 return 0; 500 return (a1 >= (u32)FILTER_DPDK_1); 501 } 502 503 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3 504 * value or 0 on error: 505 * FILTER_DPDK_1- advanced filters availabile 506 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that 507 * the IP layer must explicitly specified. I.e. cannot have a UDP 508 * filter that matches both IPv4 and IPv6. 509 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. 510 * all other filter types are not available. 511 * Retrun true in filter_tags if supported 512 */ 513 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, 514 u8 *filter_actions) 515 { 516 u64 args[4]; 517 int err; 518 u32 max_level = 0; 519 520 err = vnic_dev_advanced_filters_cap(vdev, args, 4); 521 522 /* determine supported filter actions */ 523 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ 524 if (args[2] == FILTER_CAP_MODE_V1) 525 *filter_actions = args[3]; 526 527 if (err || ((args[0] == 1) && (args[1] == 0))) { 528 /* Adv filter Command not supported or adv filters available but 529 * not enabled. Try the normal filter capability command. 530 */ 531 args[0] = CMD_ADD_FILTER; 532 args[1] = 0; 533 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); 534 if (err) 535 return err; 536 max_level = args[1]; 537 goto parse_max_level; 538 } else if (args[2] == FILTER_CAP_MODE_V1) { 539 /* parse filter capability mask in args[1] */ 540 if (args[1] & FILTER_DPDK_1_FLAG) 541 *mode = FILTER_DPDK_1; 542 else if (args[1] & FILTER_USNIC_IP_FLAG) 543 *mode = FILTER_USNIC_IP; 544 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) 545 *mode = FILTER_IPV4_5TUPLE; 546 return 0; 547 } 548 max_level = args[1]; 549 parse_max_level: 550 if (max_level >= (u32)FILTER_USNIC_IP) 551 *mode = FILTER_USNIC_IP; 552 else 553 *mode = FILTER_IPV4_5TUPLE; 554 return 0; 555 } 556 557 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, 558 bool *weak) 559 { 560 u64 a0 = CMD_NIC_CFG, a1 = 0; 561 int wait = 1000; 562 int err; 563 564 *cfg_chk = false; 565 *weak = false; 566 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 567 if (err == 0 && a0 != 0 && a1 != 0) { 568 *cfg_chk = true; 569 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); 570 } 571 } 572 573 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 574 { 575 u64 a0 = (u32)cmd, a1 = 0; 576 int wait = 1000; 577 int err; 578 579 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 580 581 return !(err || a0); 582 } 583 584 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, 585 void *value) 586 { 587 u64 a0, a1; 588 int wait = 1000; 589 int err; 590 591 a0 = offset; 592 a1 = size; 593 594 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 595 596 switch (size) { 597 case 1: 598 *(u8 *)value = (u8)a0; 599 break; 600 case 2: 601 *(u16 *)value = (u16)a0; 602 break; 603 case 4: 604 *(u32 *)value = (u32)a0; 605 break; 606 case 8: 607 *(u64 *)value = a0; 608 break; 609 default: 610 BUG(); 611 break; 612 } 613 614 return err; 615 } 616 617 int vnic_dev_stats_clear(struct vnic_dev *vdev) 618 { 619 u64 a0 = 0, a1 = 0; 620 int wait = 1000; 621 622 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 623 } 624 625 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 626 { 627 u64 a0, a1; 628 int wait = 1000; 629 630 if (!vdev->stats) 631 return -ENOMEM; 632 633 *stats = vdev->stats; 634 a0 = vdev->stats_pa; 635 a1 = sizeof(struct vnic_stats); 636 637 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 638 } 639 640 int vnic_dev_close(struct vnic_dev *vdev) 641 { 642 u64 a0 = 0, a1 = 0; 643 int wait = 1000; 644 645 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 646 } 647 648 int vnic_dev_enable_wait(struct vnic_dev *vdev) 649 { 650 u64 a0 = 0, a1 = 0; 651 int wait = 1000; 652 653 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 654 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 655 else 656 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 657 } 658 659 int vnic_dev_disable(struct vnic_dev *vdev) 660 { 661 u64 a0 = 0, a1 = 0; 662 int wait = 1000; 663 664 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 665 } 666 667 int vnic_dev_open(struct vnic_dev *vdev, int arg) 668 { 669 u64 a0 = (u32)arg, a1 = 0; 670 int wait = 1000; 671 672 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 673 } 674 675 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 676 { 677 u64 a0 = 0, a1 = 0; 678 int wait = 1000; 679 int err; 680 681 *done = 0; 682 683 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 684 if (err) 685 return err; 686 687 *done = (a0 == 0); 688 689 return 0; 690 } 691 692 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) 693 { 694 u64 a0 = 0, a1 = 0; 695 int wait = 1000; 696 int err, i; 697 698 for (i = 0; i < ETH_ALEN; i++) 699 mac_addr[i] = 0; 700 701 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 702 if (err) 703 return err; 704 705 for (i = 0; i < ETH_ALEN; i++) 706 mac_addr[i] = ((u8 *)&a0)[i]; 707 708 return 0; 709 } 710 711 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 712 int broadcast, int promisc, int allmulti) 713 { 714 u64 a0, a1 = 0; 715 int wait = 1000; 716 int err; 717 718 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 719 (multicast ? CMD_PFILTER_MULTICAST : 0) | 720 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 721 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 722 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 723 724 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 725 if (err) 726 pr_err("Can't set packet filter\n"); 727 728 return err; 729 } 730 731 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 732 { 733 u64 a0 = 0, a1 = 0; 734 int wait = 1000; 735 int err; 736 int i; 737 738 for (i = 0; i < ETH_ALEN; i++) 739 ((u8 *)&a0)[i] = addr[i]; 740 741 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 742 if (err) 743 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 744 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 745 err); 746 747 return err; 748 } 749 750 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 751 { 752 u64 a0 = 0, a1 = 0; 753 int wait = 1000; 754 int err; 755 int i; 756 757 for (i = 0; i < ETH_ALEN; i++) 758 ((u8 *)&a0)[i] = addr[i]; 759 760 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 761 if (err) 762 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 763 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 764 err); 765 766 return err; 767 } 768 769 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 770 u8 ig_vlan_rewrite_mode) 771 { 772 u64 a0 = ig_vlan_rewrite_mode, a1 = 0; 773 int wait = 1000; 774 775 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 776 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 777 &a0, &a1, wait); 778 else 779 return 0; 780 } 781 782 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) 783 { 784 vdev->in_reset = state; 785 } 786 787 static inline int vnic_dev_in_reset(struct vnic_dev *vdev) 788 { 789 return vdev->in_reset; 790 } 791 792 int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 793 void *notify_addr, dma_addr_t notify_pa, u16 intr) 794 { 795 u64 a0, a1; 796 int wait = 1000; 797 int r; 798 799 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 800 if (!vnic_dev_in_reset(vdev)) { 801 vdev->notify = notify_addr; 802 vdev->notify_pa = notify_pa; 803 } 804 805 a0 = (u64)notify_pa; 806 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 807 a1 += sizeof(struct vnic_devcmd_notify); 808 809 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 810 if (!vnic_dev_in_reset(vdev)) 811 vdev->notify_sz = (r == 0) ? (u32)a1 : 0; 812 813 return r; 814 } 815 816 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 817 { 818 void *notify_addr = NULL; 819 dma_addr_t notify_pa = 0; 820 char name[NAME_MAX]; 821 static u32 instance; 822 823 if (vdev->notify || vdev->notify_pa) { 824 return vnic_dev_notify_setcmd(vdev, vdev->notify, 825 vdev->notify_pa, intr); 826 } 827 if (!vnic_dev_in_reset(vdev)) { 828 snprintf((char *)name, sizeof(name), 829 "vnic_notify-%u", instance++); 830 notify_addr = vdev->alloc_consistent(vdev->priv, 831 sizeof(struct vnic_devcmd_notify), 832 ¬ify_pa, (u8 *)name); 833 if (!notify_addr) 834 return -ENOMEM; 835 } 836 837 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 838 } 839 840 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 841 { 842 u64 a0, a1; 843 int wait = 1000; 844 int err; 845 846 a0 = 0; /* paddr = 0 to unset notify buffer */ 847 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 848 a1 += sizeof(struct vnic_devcmd_notify); 849 850 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 851 if (!vnic_dev_in_reset(vdev)) { 852 vdev->notify = NULL; 853 vdev->notify_pa = 0; 854 vdev->notify_sz = 0; 855 } 856 857 return err; 858 } 859 860 int vnic_dev_notify_unset(struct vnic_dev *vdev) 861 { 862 if (vdev->notify && !vnic_dev_in_reset(vdev)) { 863 vdev->free_consistent(vdev->priv, 864 sizeof(struct vnic_devcmd_notify), 865 vdev->notify, 866 vdev->notify_pa); 867 } 868 869 return vnic_dev_notify_unsetcmd(vdev); 870 } 871 872 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 873 { 874 u32 *words; 875 unsigned int nwords = vdev->notify_sz / 4; 876 unsigned int i; 877 u32 csum; 878 879 if (!vdev->notify || !vdev->notify_sz) 880 return 0; 881 882 do { 883 csum = 0; 884 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 885 words = (u32 *)&vdev->notify_copy; 886 for (i = 1; i < nwords; i++) 887 csum += words[i]; 888 } while (csum != words[0]); 889 890 return 1; 891 } 892 893 int vnic_dev_init(struct vnic_dev *vdev, int arg) 894 { 895 u64 a0 = (u32)arg, a1 = 0; 896 int wait = 1000; 897 int r = 0; 898 899 if (vnic_dev_capable(vdev, CMD_INIT)) 900 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 901 else { 902 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 903 if (a0 & CMD_INITF_DEFAULT_MAC) { 904 /* Emulate these for old CMD_INIT_v1 which 905 * didn't pass a0 so no CMD_INITF_*. 906 */ 907 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 908 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 909 } 910 } 911 return r; 912 } 913 914 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 915 { 916 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 917 vdev->intr_coal_timer_info.mul = 2; 918 vdev->intr_coal_timer_info.div = 3; 919 vdev->intr_coal_timer_info.max_usec = 920 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 921 } 922 923 int vnic_dev_link_status(struct vnic_dev *vdev) 924 { 925 if (!vnic_dev_notify_ready(vdev)) 926 return 0; 927 928 return vdev->notify_copy.link_state; 929 } 930 931 u32 vnic_dev_port_speed(struct vnic_dev *vdev) 932 { 933 if (!vnic_dev_notify_ready(vdev)) 934 return 0; 935 936 return vdev->notify_copy.port_speed; 937 } 938 939 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) 940 { 941 return (usec * vdev->intr_coal_timer_info.mul) / 942 vdev->intr_coal_timer_info.div; 943 } 944 945 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) 946 { 947 return (hw_cycles * vdev->intr_coal_timer_info.div) / 948 vdev->intr_coal_timer_info.mul; 949 } 950 951 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 952 { 953 return vdev->intr_coal_timer_info.max_usec; 954 } 955 956 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) 957 { 958 char name[NAME_MAX]; 959 static u32 instance; 960 961 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); 962 vdev->stats = vdev->alloc_consistent(vdev->priv, 963 sizeof(struct vnic_stats), 964 &vdev->stats_pa, (u8 *)name); 965 return vdev->stats == NULL ? -ENOMEM : 0; 966 } 967 968 void vnic_dev_unregister(struct vnic_dev *vdev) 969 { 970 if (vdev) { 971 if (vdev->notify) 972 vdev->free_consistent(vdev->priv, 973 sizeof(struct vnic_devcmd_notify), 974 vdev->notify, 975 vdev->notify_pa); 976 if (vdev->stats) 977 vdev->free_consistent(vdev->priv, 978 sizeof(struct vnic_stats), 979 vdev->stats, vdev->stats_pa); 980 if (vdev->fw_info) 981 vdev->free_consistent(vdev->priv, 982 sizeof(struct vnic_devcmd_fw_info), 983 vdev->fw_info, vdev->fw_info_pa); 984 rte_free(vdev); 985 } 986 } 987 988 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 989 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, 990 unsigned int num_bars) 991 { 992 if (!vdev) { 993 char name[NAME_MAX]; 994 snprintf((char *)name, sizeof(name), "%s-vnic", 995 pdev->device.name); 996 vdev = (struct vnic_dev *)rte_zmalloc_socket(name, 997 sizeof(struct vnic_dev), 998 RTE_CACHE_LINE_SIZE, 999 pdev->device.numa_node); 1000 if (!vdev) 1001 return NULL; 1002 } 1003 1004 vdev->priv = priv; 1005 vdev->pdev = pdev; 1006 1007 if (vnic_dev_discover_res(vdev, bar, num_bars)) 1008 goto err_out; 1009 1010 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 1011 if (!vdev->devcmd) 1012 goto err_out; 1013 1014 return vdev; 1015 1016 err_out: 1017 vnic_dev_unregister(vdev); 1018 return NULL; 1019 } 1020 1021 /* 1022 * vnic_dev_classifier: Add/Delete classifier entries 1023 * @vdev: vdev of the device 1024 * @cmd: CLSF_ADD for Add filter 1025 * CLSF_DEL for Delete filter 1026 * @entry: In case of ADD filter, the caller passes the RQ number in this 1027 * variable. 1028 * This function stores the filter_id returned by the 1029 * firmware in the same variable before return; 1030 * 1031 * In case of DEL filter, the caller passes the RQ number. Return 1032 * value is irrelevant. 1033 * @data: filter data 1034 * @action: action data 1035 */ 1036 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, 1037 struct filter_v2 *data, struct filter_action_v2 *action_v2) 1038 { 1039 u64 a0 = 0, a1 = 0; 1040 int wait = 1000; 1041 dma_addr_t tlv_pa; 1042 int ret = -EINVAL; 1043 struct filter_tlv *tlv, *tlv_va; 1044 u64 tlv_size; 1045 u32 filter_size, action_size; 1046 static unsigned int unique_id; 1047 char z_name[RTE_MEMZONE_NAMESIZE]; 1048 enum vnic_devcmd_cmd dev_cmd; 1049 1050 if (cmd == CLSF_ADD) { 1051 dev_cmd = (data->type >= FILTER_DPDK_1) ? 1052 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; 1053 1054 filter_size = vnic_filter_size(data); 1055 action_size = vnic_action_size(action_v2); 1056 1057 tlv_size = filter_size + action_size + 1058 2*sizeof(struct filter_tlv); 1059 snprintf((char *)z_name, sizeof(z_name), 1060 "vnic_clsf_%u", unique_id++); 1061 tlv_va = vdev->alloc_consistent(vdev->priv, 1062 tlv_size, &tlv_pa, (u8 *)z_name); 1063 if (!tlv_va) 1064 return -ENOMEM; 1065 tlv = tlv_va; 1066 a0 = tlv_pa; 1067 a1 = tlv_size; 1068 memset(tlv, 0, tlv_size); 1069 tlv->type = CLSF_TLV_FILTER; 1070 tlv->length = filter_size; 1071 memcpy(&tlv->val, (void *)data, filter_size); 1072 1073 tlv = (struct filter_tlv *)((char *)tlv + 1074 sizeof(struct filter_tlv) + 1075 filter_size); 1076 1077 tlv->type = CLSF_TLV_ACTION; 1078 tlv->length = action_size; 1079 memcpy(&tlv->val, (void *)action_v2, action_size); 1080 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); 1081 *entry = (u16)a0; 1082 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); 1083 } else if (cmd == CLSF_DEL) { 1084 a0 = *entry; 1085 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1086 } 1087 1088 return ret; 1089 } 1090 1091 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) 1092 { 1093 u64 a0 = overlay; 1094 u64 a1 = config; 1095 int wait = 1000; 1096 1097 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); 1098 } 1099 1100 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, 1101 u16 vxlan_udp_port_number) 1102 { 1103 u64 a1 = vxlan_udp_port_number; 1104 u64 a0 = overlay; 1105 int wait = 1000; 1106 1107 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); 1108 } 1109 1110 int vnic_dev_capable_vxlan(struct vnic_dev *vdev) 1111 { 1112 u64 a0 = VIC_FEATURE_VXLAN; 1113 u64 a1 = 0; 1114 int wait = 1000; 1115 int ret; 1116 1117 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); 1118 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ 1119 return ret == 0 && 1120 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == 1121 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); 1122 } 1123