1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <rte_memzone.h> 7 #include <rte_memcpy.h> 8 #include <rte_string_fns.h> 9 10 #include "vnic_dev.h" 11 #include "vnic_resource.h" 12 #include "vnic_devcmd.h" 13 #include "vnic_stats.h" 14 15 16 enum vnic_proxy_type { 17 PROXY_NONE, 18 PROXY_BY_BDF, 19 PROXY_BY_INDEX, 20 }; 21 22 struct vnic_res { 23 void __iomem *vaddr; 24 dma_addr_t bus_addr; 25 unsigned int count; 26 }; 27 28 struct vnic_intr_coal_timer_info { 29 u32 mul; 30 u32 div; 31 u32 max_usec; 32 }; 33 34 struct vnic_dev { 35 void *priv; 36 struct rte_pci_device *pdev; 37 struct vnic_res res[RES_TYPE_MAX]; 38 enum vnic_dev_intr_mode intr_mode; 39 struct vnic_devcmd __iomem *devcmd; 40 struct vnic_devcmd_notify *notify; 41 struct vnic_devcmd_notify notify_copy; 42 dma_addr_t notify_pa; 43 u32 notify_sz; 44 dma_addr_t linkstatus_pa; 45 struct vnic_stats *stats; 46 dma_addr_t stats_pa; 47 struct vnic_devcmd_fw_info *fw_info; 48 dma_addr_t fw_info_pa; 49 enum vnic_proxy_type proxy; 50 u32 proxy_index; 51 u64 args[VNIC_DEVCMD_NARGS]; 52 int in_reset; 53 struct vnic_intr_coal_timer_info intr_coal_timer_info; 54 void *(*alloc_consistent)(void *priv, size_t size, 55 dma_addr_t *dma_handle, u8 *name); 56 void (*free_consistent)(void *priv, 57 size_t size, void *vaddr, 58 dma_addr_t dma_handle); 59 }; 60 61 #define VNIC_MAX_RES_HDR_SIZE \ 62 (sizeof(struct vnic_resource_header) + \ 63 sizeof(struct vnic_resource) * RES_TYPE_MAX) 64 #define VNIC_RES_STRIDE 128 65 66 void *vnic_dev_priv(struct vnic_dev *vdev) 67 { 68 return vdev->priv; 69 } 70 71 void vnic_register_cbacks(struct vnic_dev *vdev, 72 void *(*alloc_consistent)(void *priv, size_t size, 73 dma_addr_t *dma_handle, u8 *name), 74 void (*free_consistent)(void *priv, 75 size_t size, void *vaddr, 76 dma_addr_t dma_handle)) 77 { 78 vdev->alloc_consistent = alloc_consistent; 79 vdev->free_consistent = free_consistent; 80 } 81 82 static int vnic_dev_discover_res(struct vnic_dev *vdev, 83 struct vnic_dev_bar *bar, unsigned int num_bars) 84 { 85 struct vnic_resource_header __iomem *rh; 86 struct mgmt_barmap_hdr __iomem *mrh; 87 struct vnic_resource __iomem *r; 88 u8 type; 89 90 if (num_bars == 0) 91 return -EINVAL; 92 93 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 94 pr_err("vNIC BAR0 res hdr length error\n"); 95 return -EINVAL; 96 } 97 98 rh = bar->vaddr; 99 mrh = bar->vaddr; 100 if (!rh) { 101 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 102 return -EINVAL; 103 } 104 105 /* Check for mgmt vnic in addition to normal vnic */ 106 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 107 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 108 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 109 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 110 pr_err("vNIC BAR0 res magic/version error " \ 111 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 112 VNIC_RES_MAGIC, VNIC_RES_VERSION, 113 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 114 ioread32(&rh->magic), ioread32(&rh->version)); 115 return -EINVAL; 116 } 117 } 118 119 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 120 r = (struct vnic_resource __iomem *)(mrh + 1); 121 else 122 r = (struct vnic_resource __iomem *)(rh + 1); 123 124 125 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 126 u8 bar_num = ioread8(&r->bar); 127 u32 bar_offset = ioread32(&r->bar_offset); 128 u32 count = ioread32(&r->count); 129 u32 len; 130 131 r++; 132 133 if (bar_num >= num_bars) 134 continue; 135 136 if (!bar[bar_num].len || !bar[bar_num].vaddr) 137 continue; 138 139 switch (type) { 140 case RES_TYPE_WQ: 141 case RES_TYPE_RQ: 142 case RES_TYPE_CQ: 143 case RES_TYPE_INTR_CTRL: 144 /* each count is stride bytes long */ 145 len = count * VNIC_RES_STRIDE; 146 if (len + bar_offset > bar[bar_num].len) { 147 pr_err("vNIC BAR0 resource %d " \ 148 "out-of-bounds, offset 0x%x + " \ 149 "size 0x%x > bar len 0x%lx\n", 150 type, bar_offset, 151 len, 152 bar[bar_num].len); 153 return -EINVAL; 154 } 155 break; 156 case RES_TYPE_INTR_PBA_LEGACY: 157 case RES_TYPE_DEVCMD: 158 len = count; 159 break; 160 default: 161 continue; 162 } 163 164 vdev->res[type].count = count; 165 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 166 bar_offset; 167 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 168 } 169 170 return 0; 171 } 172 173 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 174 enum vnic_res_type type) 175 { 176 return vdev->res[type].count; 177 } 178 179 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 180 unsigned int index) 181 { 182 if (!vdev->res[type].vaddr) 183 return NULL; 184 185 switch (type) { 186 case RES_TYPE_WQ: 187 case RES_TYPE_RQ: 188 case RES_TYPE_CQ: 189 case RES_TYPE_INTR_CTRL: 190 return (char __iomem *)vdev->res[type].vaddr + 191 index * VNIC_RES_STRIDE; 192 default: 193 return (char __iomem *)vdev->res[type].vaddr; 194 } 195 } 196 197 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 198 unsigned int desc_count, unsigned int desc_size) 199 { 200 /* The base address of the desc rings must be 512 byte aligned. 201 * Descriptor count is aligned to groups of 32 descriptors. A 202 * count of 0 means the maximum 4096 descriptors. Descriptor 203 * size is aligned to 16 bytes. 204 */ 205 206 unsigned int count_align = 32; 207 unsigned int desc_align = 16; 208 209 ring->base_align = 512; 210 211 if (desc_count == 0) 212 desc_count = 4096; 213 214 ring->desc_count = VNIC_ALIGN(desc_count, count_align); 215 216 ring->desc_size = VNIC_ALIGN(desc_size, desc_align); 217 218 ring->size = ring->desc_count * ring->desc_size; 219 ring->size_unaligned = ring->size + ring->base_align; 220 221 return ring->size_unaligned; 222 } 223 224 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 225 { 226 memset(ring->descs, 0, ring->size); 227 } 228 229 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, 230 struct vnic_dev_ring *ring, 231 unsigned int desc_count, unsigned int desc_size, 232 __attribute__((unused)) unsigned int socket_id, 233 char *z_name) 234 { 235 void *alloc_addr; 236 dma_addr_t alloc_pa = 0; 237 238 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 239 alloc_addr = vdev->alloc_consistent(vdev->priv, 240 ring->size_unaligned, 241 &alloc_pa, (u8 *)z_name); 242 if (!alloc_addr) { 243 pr_err("Failed to allocate ring (size=%d), aborting\n", 244 (int)ring->size); 245 return -ENOMEM; 246 } 247 ring->descs_unaligned = alloc_addr; 248 if (!alloc_pa) { 249 pr_err("Failed to map allocated ring (size=%d), aborting\n", 250 (int)ring->size); 251 vdev->free_consistent(vdev->priv, 252 ring->size_unaligned, 253 alloc_addr, 254 alloc_pa); 255 return -ENOMEM; 256 } 257 ring->base_addr_unaligned = alloc_pa; 258 259 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, 260 ring->base_align); 261 ring->descs = (u8 *)ring->descs_unaligned + 262 (ring->base_addr - ring->base_addr_unaligned); 263 264 vnic_dev_clear_desc_ring(ring); 265 266 ring->desc_avail = ring->desc_count - 1; 267 268 return 0; 269 } 270 271 void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev, 272 struct vnic_dev_ring *ring) 273 { 274 if (ring->descs) { 275 vdev->free_consistent(vdev->priv, 276 ring->size_unaligned, 277 ring->descs_unaligned, 278 ring->base_addr_unaligned); 279 ring->descs = NULL; 280 } 281 } 282 283 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 284 int wait) 285 { 286 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 287 unsigned int i; 288 int delay; 289 u32 status; 290 int err; 291 292 status = ioread32(&devcmd->status); 293 if (status == 0xFFFFFFFF) { 294 /* PCI-e target device is gone */ 295 return -ENODEV; 296 } 297 if (status & STAT_BUSY) { 298 299 pr_err("Busy devcmd %d\n", _CMD_N(cmd)); 300 return -EBUSY; 301 } 302 303 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 304 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 305 writeq(vdev->args[i], &devcmd->args[i]); 306 wmb(); /* complete all writes initiated till now */ 307 } 308 309 iowrite32(cmd, &devcmd->cmd); 310 311 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 312 return 0; 313 314 for (delay = 0; delay < wait; delay++) { 315 316 udelay(100); 317 318 status = ioread32(&devcmd->status); 319 if (status == 0xFFFFFFFF) { 320 /* PCI-e target device is gone */ 321 return -ENODEV; 322 } 323 324 if (!(status & STAT_BUSY)) { 325 if (status & STAT_ERROR) { 326 err = -(int)readq(&devcmd->args[0]); 327 if (cmd != CMD_CAPABILITY) 328 pr_err("Devcmd %d failed " \ 329 "with error code %d\n", 330 _CMD_N(cmd), err); 331 return err; 332 } 333 334 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 335 rmb();/* finish all reads initiated till now */ 336 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 337 vdev->args[i] = readq(&devcmd->args[i]); 338 } 339 340 return 0; 341 } 342 } 343 344 pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); 345 return -ETIMEDOUT; 346 } 347 348 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 349 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 350 u64 *args, int nargs, int wait) 351 { 352 u32 status; 353 int err; 354 355 /* 356 * Proxy command consumes 2 arguments. One for proxy index, 357 * the other is for command to be proxied 358 */ 359 if (nargs > VNIC_DEVCMD_NARGS - 2) { 360 pr_err("number of args %d exceeds the maximum\n", nargs); 361 return -EINVAL; 362 } 363 memset(vdev->args, 0, sizeof(vdev->args)); 364 365 vdev->args[0] = vdev->proxy_index; 366 vdev->args[1] = cmd; 367 memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); 368 369 err = _vnic_dev_cmd(vdev, proxy_cmd, wait); 370 if (err) 371 return err; 372 373 status = (u32)vdev->args[0]; 374 if (status & STAT_ERROR) { 375 err = (int)vdev->args[1]; 376 if (err != ERR_ECMDUNKNOWN || 377 cmd != CMD_CAPABILITY) 378 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); 379 return err; 380 } 381 382 memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); 383 384 return 0; 385 } 386 387 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 388 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) 389 { 390 int err; 391 392 if (nargs > VNIC_DEVCMD_NARGS) { 393 pr_err("number of args %d exceeds the maximum\n", nargs); 394 return -EINVAL; 395 } 396 memset(vdev->args, 0, sizeof(vdev->args)); 397 memcpy(vdev->args, args, nargs * sizeof(args[0])); 398 399 err = _vnic_dev_cmd(vdev, cmd, wait); 400 401 memcpy(args, vdev->args, nargs * sizeof(args[0])); 402 403 return err; 404 } 405 406 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 407 u64 *a0, u64 *a1, int wait) 408 { 409 u64 args[2]; 410 int err; 411 412 args[0] = *a0; 413 args[1] = *a1; 414 memset(vdev->args, 0, sizeof(vdev->args)); 415 416 switch (vdev->proxy) { 417 case PROXY_BY_INDEX: 418 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 419 args, ARRAY_SIZE(args), wait); 420 break; 421 case PROXY_BY_BDF: 422 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 423 args, ARRAY_SIZE(args), wait); 424 break; 425 case PROXY_NONE: 426 default: 427 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); 428 break; 429 } 430 431 if (err == 0) { 432 *a0 = args[0]; 433 *a1 = args[1]; 434 } 435 436 return err; 437 } 438 439 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 440 u64 *args, int nargs, int wait) 441 { 442 switch (vdev->proxy) { 443 case PROXY_BY_INDEX: 444 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 445 args, nargs, wait); 446 case PROXY_BY_BDF: 447 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 448 args, nargs, wait); 449 case PROXY_NONE: 450 default: 451 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); 452 } 453 } 454 455 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args, 456 int nargs) 457 { 458 memset(args, 0, nargs * sizeof(*args)); 459 args[0] = CMD_ADD_ADV_FILTER; 460 args[1] = FILTER_CAP_MODE_V1_FLAG; 461 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); 462 } 463 464 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) 465 { 466 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0; 467 int wait = 1000; 468 int err; 469 470 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 471 if (err) 472 return 0; 473 return (a1 >= (u32)FILTER_DPDK_1); 474 } 475 476 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3 477 * value or 0 on error: 478 * FILTER_DPDK_1- advanced filters availabile 479 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that 480 * the IP layer must explicitly specified. I.e. cannot have a UDP 481 * filter that matches both IPv4 and IPv6. 482 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. 483 * all other filter types are not available. 484 * Retrun true in filter_tags if supported 485 */ 486 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, 487 u8 *filter_tags) 488 { 489 u64 args[4]; 490 int err; 491 u32 max_level = 0; 492 493 err = vnic_dev_advanced_filters_cap(vdev, args, 4); 494 495 /* determine if filter tags are available */ 496 if (err) 497 *filter_tags = 0; 498 if ((args[2] == FILTER_CAP_MODE_V1) && 499 (args[3] & FILTER_ACTION_FILTER_ID_FLAG)) 500 *filter_tags = 1; 501 else 502 *filter_tags = 0; 503 504 if (err || ((args[0] == 1) && (args[1] == 0))) { 505 /* Adv filter Command not supported or adv filters available but 506 * not enabled. Try the normal filter capability command. 507 */ 508 args[0] = CMD_ADD_FILTER; 509 args[1] = 0; 510 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); 511 if (err) 512 return err; 513 max_level = args[1]; 514 goto parse_max_level; 515 } else if (args[2] == FILTER_CAP_MODE_V1) { 516 /* parse filter capability mask in args[1] */ 517 if (args[1] & FILTER_DPDK_1_FLAG) 518 *mode = FILTER_DPDK_1; 519 else if (args[1] & FILTER_USNIC_IP_FLAG) 520 *mode = FILTER_USNIC_IP; 521 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) 522 *mode = FILTER_IPV4_5TUPLE; 523 return 0; 524 } 525 max_level = args[1]; 526 parse_max_level: 527 if (max_level >= (u32)FILTER_USNIC_IP) 528 *mode = FILTER_USNIC_IP; 529 else 530 *mode = FILTER_IPV4_5TUPLE; 531 return 0; 532 } 533 534 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 535 { 536 u64 a0 = (u32)cmd, a1 = 0; 537 int wait = 1000; 538 int err; 539 540 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 541 542 return !(err || a0); 543 } 544 545 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, 546 void *value) 547 { 548 u64 a0, a1; 549 int wait = 1000; 550 int err; 551 552 a0 = offset; 553 a1 = size; 554 555 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 556 557 switch (size) { 558 case 1: 559 *(u8 *)value = (u8)a0; 560 break; 561 case 2: 562 *(u16 *)value = (u16)a0; 563 break; 564 case 4: 565 *(u32 *)value = (u32)a0; 566 break; 567 case 8: 568 *(u64 *)value = a0; 569 break; 570 default: 571 BUG(); 572 break; 573 } 574 575 return err; 576 } 577 578 int vnic_dev_stats_clear(struct vnic_dev *vdev) 579 { 580 u64 a0 = 0, a1 = 0; 581 int wait = 1000; 582 583 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 584 } 585 586 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 587 { 588 u64 a0, a1; 589 int wait = 1000; 590 static u32 instance; 591 char name[NAME_MAX]; 592 593 if (!vdev->stats) { 594 snprintf((char *)name, sizeof(name), 595 "vnic_stats-%u", instance++); 596 vdev->stats = vdev->alloc_consistent(vdev->priv, 597 sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); 598 if (!vdev->stats) 599 return -ENOMEM; 600 } 601 602 *stats = vdev->stats; 603 a0 = vdev->stats_pa; 604 a1 = sizeof(struct vnic_stats); 605 606 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 607 } 608 609 int vnic_dev_close(struct vnic_dev *vdev) 610 { 611 u64 a0 = 0, a1 = 0; 612 int wait = 1000; 613 614 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 615 } 616 617 int vnic_dev_enable_wait(struct vnic_dev *vdev) 618 { 619 u64 a0 = 0, a1 = 0; 620 int wait = 1000; 621 622 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 623 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 624 else 625 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 626 } 627 628 int vnic_dev_disable(struct vnic_dev *vdev) 629 { 630 u64 a0 = 0, a1 = 0; 631 int wait = 1000; 632 633 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 634 } 635 636 int vnic_dev_open(struct vnic_dev *vdev, int arg) 637 { 638 u64 a0 = (u32)arg, a1 = 0; 639 int wait = 1000; 640 641 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 642 } 643 644 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 645 { 646 u64 a0 = 0, a1 = 0; 647 int wait = 1000; 648 int err; 649 650 *done = 0; 651 652 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 653 if (err) 654 return err; 655 656 *done = (a0 == 0); 657 658 return 0; 659 } 660 661 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) 662 { 663 u64 a0 = 0, a1 = 0; 664 int wait = 1000; 665 int err, i; 666 667 for (i = 0; i < ETH_ALEN; i++) 668 mac_addr[i] = 0; 669 670 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 671 if (err) 672 return err; 673 674 for (i = 0; i < ETH_ALEN; i++) 675 mac_addr[i] = ((u8 *)&a0)[i]; 676 677 return 0; 678 } 679 680 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 681 int broadcast, int promisc, int allmulti) 682 { 683 u64 a0, a1 = 0; 684 int wait = 1000; 685 int err; 686 687 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 688 (multicast ? CMD_PFILTER_MULTICAST : 0) | 689 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 690 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 691 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 692 693 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 694 if (err) 695 pr_err("Can't set packet filter\n"); 696 697 return err; 698 } 699 700 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 701 { 702 u64 a0 = 0, a1 = 0; 703 int wait = 1000; 704 int err; 705 int i; 706 707 for (i = 0; i < ETH_ALEN; i++) 708 ((u8 *)&a0)[i] = addr[i]; 709 710 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 711 if (err) 712 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 713 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 714 err); 715 716 return err; 717 } 718 719 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 720 { 721 u64 a0 = 0, a1 = 0; 722 int wait = 1000; 723 int err; 724 int i; 725 726 for (i = 0; i < ETH_ALEN; i++) 727 ((u8 *)&a0)[i] = addr[i]; 728 729 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 730 if (err) 731 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 732 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 733 err); 734 735 return err; 736 } 737 738 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 739 u8 ig_vlan_rewrite_mode) 740 { 741 u64 a0 = ig_vlan_rewrite_mode, a1 = 0; 742 int wait = 1000; 743 744 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 745 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 746 &a0, &a1, wait); 747 else 748 return 0; 749 } 750 751 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) 752 { 753 vdev->in_reset = state; 754 } 755 756 static inline int vnic_dev_in_reset(struct vnic_dev *vdev) 757 { 758 return vdev->in_reset; 759 } 760 761 int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 762 void *notify_addr, dma_addr_t notify_pa, u16 intr) 763 { 764 u64 a0, a1; 765 int wait = 1000; 766 int r; 767 768 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 769 if (!vnic_dev_in_reset(vdev)) { 770 vdev->notify = notify_addr; 771 vdev->notify_pa = notify_pa; 772 } 773 774 a0 = (u64)notify_pa; 775 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 776 a1 += sizeof(struct vnic_devcmd_notify); 777 778 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 779 if (!vnic_dev_in_reset(vdev)) 780 vdev->notify_sz = (r == 0) ? (u32)a1 : 0; 781 782 return r; 783 } 784 785 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 786 { 787 void *notify_addr = NULL; 788 dma_addr_t notify_pa = 0; 789 char name[NAME_MAX]; 790 static u32 instance; 791 792 if (vdev->notify || vdev->notify_pa) { 793 return vnic_dev_notify_setcmd(vdev, vdev->notify, 794 vdev->notify_pa, intr); 795 } 796 if (!vnic_dev_in_reset(vdev)) { 797 snprintf((char *)name, sizeof(name), 798 "vnic_notify-%u", instance++); 799 notify_addr = vdev->alloc_consistent(vdev->priv, 800 sizeof(struct vnic_devcmd_notify), 801 ¬ify_pa, (u8 *)name); 802 if (!notify_addr) 803 return -ENOMEM; 804 } 805 806 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 807 } 808 809 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 810 { 811 u64 a0, a1; 812 int wait = 1000; 813 int err; 814 815 a0 = 0; /* paddr = 0 to unset notify buffer */ 816 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 817 a1 += sizeof(struct vnic_devcmd_notify); 818 819 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 820 if (!vnic_dev_in_reset(vdev)) { 821 vdev->notify = NULL; 822 vdev->notify_pa = 0; 823 vdev->notify_sz = 0; 824 } 825 826 return err; 827 } 828 829 int vnic_dev_notify_unset(struct vnic_dev *vdev) 830 { 831 if (vdev->notify && !vnic_dev_in_reset(vdev)) { 832 vdev->free_consistent(vdev->priv, 833 sizeof(struct vnic_devcmd_notify), 834 vdev->notify, 835 vdev->notify_pa); 836 } 837 838 return vnic_dev_notify_unsetcmd(vdev); 839 } 840 841 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 842 { 843 u32 *words; 844 unsigned int nwords = vdev->notify_sz / 4; 845 unsigned int i; 846 u32 csum; 847 848 if (!vdev->notify || !vdev->notify_sz) 849 return 0; 850 851 do { 852 csum = 0; 853 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 854 words = (u32 *)&vdev->notify_copy; 855 for (i = 1; i < nwords; i++) 856 csum += words[i]; 857 } while (csum != words[0]); 858 859 return 1; 860 } 861 862 int vnic_dev_init(struct vnic_dev *vdev, int arg) 863 { 864 u64 a0 = (u32)arg, a1 = 0; 865 int wait = 1000; 866 int r = 0; 867 868 if (vnic_dev_capable(vdev, CMD_INIT)) 869 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 870 else { 871 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 872 if (a0 & CMD_INITF_DEFAULT_MAC) { 873 /* Emulate these for old CMD_INIT_v1 which 874 * didn't pass a0 so no CMD_INITF_*. 875 */ 876 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 877 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 878 } 879 } 880 return r; 881 } 882 883 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 884 { 885 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 886 vdev->intr_coal_timer_info.mul = 2; 887 vdev->intr_coal_timer_info.div = 3; 888 vdev->intr_coal_timer_info.max_usec = 889 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 890 } 891 892 int vnic_dev_link_status(struct vnic_dev *vdev) 893 { 894 if (!vnic_dev_notify_ready(vdev)) 895 return 0; 896 897 return vdev->notify_copy.link_state; 898 } 899 900 u32 vnic_dev_port_speed(struct vnic_dev *vdev) 901 { 902 if (!vnic_dev_notify_ready(vdev)) 903 return 0; 904 905 return vdev->notify_copy.port_speed; 906 } 907 908 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) 909 { 910 return (usec * vdev->intr_coal_timer_info.mul) / 911 vdev->intr_coal_timer_info.div; 912 } 913 914 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) 915 { 916 return (hw_cycles * vdev->intr_coal_timer_info.div) / 917 vdev->intr_coal_timer_info.mul; 918 } 919 920 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 921 { 922 return vdev->intr_coal_timer_info.max_usec; 923 } 924 925 void vnic_dev_unregister(struct vnic_dev *vdev) 926 { 927 if (vdev) { 928 if (vdev->notify) 929 vdev->free_consistent(vdev->priv, 930 sizeof(struct vnic_devcmd_notify), 931 vdev->notify, 932 vdev->notify_pa); 933 if (vdev->stats) 934 vdev->free_consistent(vdev->priv, 935 sizeof(struct vnic_stats), 936 vdev->stats, vdev->stats_pa); 937 if (vdev->fw_info) 938 vdev->free_consistent(vdev->priv, 939 sizeof(struct vnic_devcmd_fw_info), 940 vdev->fw_info, vdev->fw_info_pa); 941 rte_free(vdev); 942 } 943 } 944 945 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 946 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, 947 unsigned int num_bars) 948 { 949 if (!vdev) { 950 char name[NAME_MAX]; 951 snprintf((char *)name, sizeof(name), "%s-vnic", 952 pdev->device.name); 953 vdev = (struct vnic_dev *)rte_zmalloc_socket(name, 954 sizeof(struct vnic_dev), 955 RTE_CACHE_LINE_SIZE, 956 pdev->device.numa_node); 957 if (!vdev) 958 return NULL; 959 } 960 961 vdev->priv = priv; 962 vdev->pdev = pdev; 963 964 if (vnic_dev_discover_res(vdev, bar, num_bars)) 965 goto err_out; 966 967 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 968 if (!vdev->devcmd) 969 goto err_out; 970 971 return vdev; 972 973 err_out: 974 vnic_dev_unregister(vdev); 975 return NULL; 976 } 977 978 /* 979 * vnic_dev_classifier: Add/Delete classifier entries 980 * @vdev: vdev of the device 981 * @cmd: CLSF_ADD for Add filter 982 * CLSF_DEL for Delete filter 983 * @entry: In case of ADD filter, the caller passes the RQ number in this 984 * variable. 985 * This function stores the filter_id returned by the 986 * firmware in the same variable before return; 987 * 988 * In case of DEL filter, the caller passes the RQ number. Return 989 * value is irrelevant. 990 * @data: filter data 991 * @action: action data 992 */ 993 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, 994 struct filter_v2 *data, struct filter_action_v2 *action_v2) 995 { 996 u64 a0 = 0, a1 = 0; 997 int wait = 1000; 998 dma_addr_t tlv_pa; 999 int ret = -EINVAL; 1000 struct filter_tlv *tlv, *tlv_va; 1001 u64 tlv_size; 1002 u32 filter_size, action_size; 1003 static unsigned int unique_id; 1004 char z_name[RTE_MEMZONE_NAMESIZE]; 1005 enum vnic_devcmd_cmd dev_cmd; 1006 1007 if (cmd == CLSF_ADD) { 1008 dev_cmd = (data->type >= FILTER_DPDK_1) ? 1009 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; 1010 1011 filter_size = vnic_filter_size(data); 1012 action_size = vnic_action_size(action_v2); 1013 1014 tlv_size = filter_size + action_size + 1015 2*sizeof(struct filter_tlv); 1016 snprintf((char *)z_name, sizeof(z_name), 1017 "vnic_clsf_%u", unique_id++); 1018 tlv_va = vdev->alloc_consistent(vdev->priv, 1019 tlv_size, &tlv_pa, (u8 *)z_name); 1020 if (!tlv_va) 1021 return -ENOMEM; 1022 tlv = tlv_va; 1023 a0 = tlv_pa; 1024 a1 = tlv_size; 1025 memset(tlv, 0, tlv_size); 1026 tlv->type = CLSF_TLV_FILTER; 1027 tlv->length = filter_size; 1028 memcpy(&tlv->val, (void *)data, filter_size); 1029 1030 tlv = (struct filter_tlv *)((char *)tlv + 1031 sizeof(struct filter_tlv) + 1032 filter_size); 1033 1034 tlv->type = CLSF_TLV_ACTION; 1035 tlv->length = action_size; 1036 memcpy(&tlv->val, (void *)action_v2, action_size); 1037 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); 1038 *entry = (u16)a0; 1039 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); 1040 } else if (cmd == CLSF_DEL) { 1041 a0 = *entry; 1042 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1043 } 1044 1045 return ret; 1046 } 1047