1 /* 2 * Generic virtio library for MINIX 3 3 * 4 * Copyright (c) 2013, A. Welzel, <arne.welzel@gmail.com> 5 * 6 * This software is released under the BSD license. See the LICENSE file 7 * included in the main directory of this source distribution for the 8 * license terms and conditions. 9 */ 10 11 #define _SYSTEM 1 12 13 #include <assert.h> 14 #include <errno.h> /* for OK... */ 15 #include <string.h> /* memset() */ 16 #include <stdlib.h> /* malloc() */ 17 18 #include <machine/pci.h> /* PCI_ILR, PCI_BAR... */ 19 #include <machine/vmparam.h> /* PAGE_SIZE */ 20 21 #include <minix/syslib.h> /* umap, vumap, alloc_..*/ 22 #include <minix/sysutil.h> /* panic(), at least */ 23 #include <minix/virtio.h> /* virtio system include */ 24 25 #include "virtio_ring.h" /* virtio types / helper */ 26 27 /* 28 * About indirect descriptors: 29 * 30 * For each possible thread, a single indirect descriptor table is allocated. 31 * If using direct descriptors would lead to the situation that another thread 32 * might not be able to add another descriptor to the ring, indirect descriptors 33 * are used. 34 * 35 * Indirect descriptors are pre-allocated. Each alloc_contig() call involves a 36 * kernel call which is critical for performance. 37 * 38 * The size of indirect descriptor tables is chosen based on MAPVEC_NR. A driver 39 * using this library should never add more than 40 * 41 * MAPVEC_NR + MAPVEC_NR / 2 42 * 43 * descriptors to a queue as this represent the maximum size of an indirect 44 * descriptor table. 45 */ 46 47 struct indirect_desc_table { 48 int in_use; 49 struct vring_desc *descs; 50 phys_bytes paddr; 51 size_t len; 52 }; 53 54 struct virtio_queue { 55 56 void *vaddr; /* virtual addr of ring */ 57 phys_bytes paddr; /* physical addr of ring */ 58 u32_t page; /* physical guest page */ 59 60 u16_t num; /* number of descriptors */ 61 u32_t ring_size; /* size of ring in bytes */ 62 struct vring vring; 63 64 u16_t free_num; /* free descriptors */ 65 u16_t free_head; /* next free descriptor */ 66 u16_t free_tail; /* last free descriptor */ 67 u16_t last_used; /* we checked in used */ 68 69 void **data; /* points to pointers */ 70 }; 71 72 struct virtio_device { 73 74 const char *name; /* for debugging */ 75 76 u16_t port; /* io port */ 77 78 struct virtio_feature *features; /* host / guest features */ 79 u8_t num_features; /* max 32 */ 80 81 struct virtio_queue *queues; /* our queues */ 82 u16_t num_queues; 83 84 int irq; /* interrupt line */ 85 int irq_hook; /* hook id */ 86 int msi; /* is MSI enabled? */ 87 88 int threads; /* max number of threads */ 89 90 struct indirect_desc_table *indirect; /* indirect descriptor tables */ 91 int num_indirect; 92 }; 93 94 static int is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid); 95 static int init_device(int devind, struct virtio_device *dev); 96 static int init_phys_queues(struct virtio_device *dev); 97 static int exchange_features(struct virtio_device *dev); 98 static int alloc_phys_queue(struct virtio_queue *q); 99 static void free_phys_queue(struct virtio_queue *q); 100 static void init_phys_queue(struct virtio_queue *q); 101 static int init_indirect_desc_table(struct indirect_desc_table *desc); 102 static int init_indirect_desc_tables(struct virtio_device *dev); 103 static void virtio_irq_register(struct virtio_device *dev); 104 static void virtio_irq_unregister(struct virtio_device *dev); 105 static int wants_kick(struct virtio_queue *q); 106 static void kick_queue(struct virtio_device *dev, int qidx); 107 108 struct virtio_device * 109 virtio_setup_device(u16_t subdevid, const char *name, 110 struct virtio_feature *features, int num_features, 111 int threads, int skip) 112 { 113 int r, devind; 114 u16_t vid, did, sdid; 115 struct virtio_device *ret; 116 117 /* bogus values? */ 118 if (skip < 0 || name == NULL || num_features < 0 || threads <= 0) 119 return NULL; 120 121 pci_init(); 122 123 r = pci_first_dev(&devind, &vid, &did); 124 125 while (r > 0) { 126 sdid = pci_attr_r16(devind, PCI_SUBDID); 127 if (is_matching_device(subdevid, vid, sdid)) { 128 129 /* this is the device we are looking for */ 130 if (skip == 0) 131 break; 132 133 skip--; 134 } 135 136 r = pci_next_dev(&devind, &vid, &did); 137 } 138 139 /* pci_[first|next_dev()] return 0 if no device was found */ 140 if (r == 0 || skip > 0) 141 return NULL; 142 143 /* allocate and set known info about the device */ 144 ret = malloc(sizeof(*ret)); 145 146 if (ret == NULL) 147 return NULL; 148 149 /* Prepare virtio_device intance */ 150 memset(ret, 0, sizeof(*ret)); 151 ret->name = name; 152 ret->features = features; 153 ret->num_features = num_features; 154 ret->threads = threads; 155 /* see comment in the beginning of this file */ 156 ret->num_indirect = threads; 157 158 if (init_device(devind, ret) != OK) { 159 printf("%s: Could not initialize device\n", ret->name); 160 goto err; 161 } 162 163 /* Ack the device */ 164 virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_ACK); 165 166 if (exchange_features(ret) != OK) { 167 printf("%s: Could not exchange features\n", ret->name); 168 goto err; 169 } 170 171 if (init_indirect_desc_tables(ret) != OK) { 172 printf("%s: Could not initialize indirect tables\n", ret->name); 173 goto err; 174 } 175 176 /* We know how to drive the device... */ 177 virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV); 178 179 return ret; 180 181 /* Error path */ 182 err: 183 free(ret); 184 return NULL; 185 } 186 187 static int 188 init_device(int devind, struct virtio_device *dev) 189 { 190 u32_t base, size; 191 int iof, r; 192 193 pci_reserve(devind); 194 195 if ((r = pci_get_bar(devind, PCI_BAR, &base, &size, &iof)) != OK) { 196 printf("%s: Could not get BAR (%d)", dev->name, r); 197 return r; 198 } 199 200 if (!iof) { 201 printf("%s: PCI not IO space?", dev->name); 202 return EINVAL; 203 } 204 205 if (base & 0xFFFF0000) { 206 printf("%s: IO port weird (%08x)", dev->name, base); 207 return EINVAL; 208 } 209 210 /* store the I/O port */ 211 dev->port = base; 212 213 /* Reset the device */ 214 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0); 215 216 /* Read IRQ line */ 217 dev->irq = pci_attr_r8(devind, PCI_ILR); 218 219 return OK; 220 } 221 222 static int 223 exchange_features(struct virtio_device *dev) 224 { 225 u32_t guest_features = 0, host_features = 0; 226 struct virtio_feature *f; 227 228 host_features = virtio_read32(dev, VIRTIO_HOST_F_OFF); 229 230 for (int i = 0; i < dev->num_features; i++) { 231 f = &dev->features[i]; 232 233 /* prepare the features the driver supports */ 234 guest_features |= (f->guest_support << f->bit); 235 236 /* just load the host feature int the struct */ 237 f->host_support = ((host_features >> f->bit) & 1); 238 } 239 240 /* let the device know about our features */ 241 virtio_write32(dev, VIRTIO_GUEST_F_OFF, guest_features); 242 243 return OK; 244 } 245 246 int 247 virtio_alloc_queues(struct virtio_device *dev, int num_queues) 248 { 249 int r = OK; 250 251 assert(dev != NULL); 252 253 /* Assume there's no device with more than 256 queues */ 254 if (num_queues < 0 || num_queues > 256) 255 return EINVAL; 256 257 dev->num_queues = num_queues; 258 /* allocate queue memory */ 259 dev->queues = malloc(num_queues * sizeof(dev->queues[0])); 260 261 if (dev->queues == NULL) 262 return ENOMEM; 263 264 memset(dev->queues, 0, num_queues * sizeof(dev->queues[0])); 265 266 if ((r = init_phys_queues(dev) != OK)) { 267 printf("%s: Could not initialize queues (%d)\n", dev->name, r); 268 free(dev->queues); 269 dev->queues = NULL; 270 } 271 272 return r; 273 } 274 275 static int 276 init_phys_queues(struct virtio_device *dev) 277 { 278 /* Initialize all queues */ 279 int i, j, r; 280 struct virtio_queue *q; 281 282 for (i = 0; i < dev->num_queues; i++) { 283 q = &dev->queues[i]; 284 /* select the queue */ 285 virtio_write16(dev, VIRTIO_QSEL_OFF, i); 286 q->num = virtio_read16(dev, VIRTIO_QSIZE_OFF); 287 288 if (q->num & (q->num - 1)) { 289 printf("%s: Queue %d num=%d not ^2", dev->name, i, 290 q->num); 291 r = EINVAL; 292 goto free_phys_queues; 293 } 294 295 if ((r = alloc_phys_queue(q)) != OK) 296 goto free_phys_queues; 297 298 init_phys_queue(q); 299 300 /* Let the host know about the guest physical page */ 301 virtio_write32(dev, VIRTIO_QADDR_OFF, q->page); 302 } 303 304 return OK; 305 306 /* Error path */ 307 free_phys_queues: 308 for (j = 0; j < i; j++) 309 free_phys_queue(&dev->queues[i]); 310 311 return r; 312 } 313 314 static int 315 alloc_phys_queue(struct virtio_queue *q) 316 { 317 assert(q != NULL); 318 319 /* How much memory do we need? */ 320 q->ring_size = vring_size(q->num, PAGE_SIZE); 321 322 q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr); 323 324 if (q->vaddr == NULL) 325 return ENOMEM; 326 327 q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL); 328 329 if (q->data == NULL) { 330 free_contig(q->vaddr, q->ring_size); 331 q->vaddr = NULL; 332 q->paddr = 0; 333 return ENOMEM; 334 } 335 336 return OK; 337 } 338 339 void 340 virtio_device_ready(struct virtio_device *dev) 341 { 342 assert(dev != NULL); 343 344 /* Register IRQ line */ 345 virtio_irq_register(dev); 346 347 /* Driver is ready to go! */ 348 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV_OK); 349 } 350 351 void 352 virtio_free_queues(struct virtio_device *dev) 353 { 354 int i; 355 assert(dev != NULL); 356 assert(dev->queues != NULL); 357 assert(dev->num_queues > 0); 358 359 for (i = 0; i < dev->num_queues; i++) 360 free_phys_queue(&dev->queues[i]); 361 362 dev->num_queues = 0; 363 dev->queues = NULL; 364 } 365 366 static void 367 free_phys_queue(struct virtio_queue *q) 368 { 369 assert(q != NULL); 370 assert(q->vaddr != NULL); 371 372 free_contig(q->vaddr, q->ring_size); 373 q->vaddr = NULL; 374 q->paddr = 0; 375 q->num = 0; 376 free_contig(q->data, sizeof(q->data[0])); 377 q->data = NULL; 378 } 379 380 static void 381 init_phys_queue(struct virtio_queue *q) 382 { 383 memset(q->vaddr, 0, q->ring_size); 384 memset(q->data, 0, sizeof(q->data[0]) * q->num); 385 386 /* physical page in guest */ 387 q->page = q->paddr / PAGE_SIZE; 388 389 /* Set pointers in q->vring according to size */ 390 vring_init(&q->vring, q->num, q->vaddr, PAGE_SIZE); 391 392 /* Everything's free at this point */ 393 for (int i = 0; i < q->num; i++) { 394 q->vring.desc[i].flags = VRING_DESC_F_NEXT; 395 q->vring.desc[i].next = (i + 1) & (q->num - 1); 396 } 397 398 q->free_num = q->num; 399 q->free_head = 0; 400 q->free_tail = q->num - 1; 401 q->last_used = 0; 402 403 return; 404 } 405 406 void 407 virtio_free_device(struct virtio_device *dev) 408 { 409 int i; 410 struct indirect_desc_table *desc; 411 412 assert(dev != NULL); 413 414 assert(dev->num_indirect > 0); 415 416 for (i = 0; i < dev->num_indirect; i++) { 417 desc = &dev->indirect[i]; 418 free_contig(desc->descs, desc->len); 419 } 420 421 dev->num_indirect = 0; 422 423 assert(dev->indirect != NULL); 424 free(dev->indirect); 425 dev->indirect = NULL; 426 427 free(dev); 428 } 429 430 static int 431 init_indirect_desc_table(struct indirect_desc_table *desc) 432 { 433 desc->in_use = 0; 434 desc->len = (MAPVEC_NR + MAPVEC_NR / 2) * sizeof(struct vring_desc); 435 436 desc->descs = alloc_contig(desc->len, AC_ALIGN4K, &desc->paddr); 437 memset(desc->descs, 0, desc->len); 438 439 if (desc->descs == NULL) 440 return ENOMEM; 441 442 return OK; 443 } 444 445 static int 446 init_indirect_desc_tables(struct virtio_device *dev) 447 { 448 int i, j, r; 449 struct indirect_desc_table *desc; 450 451 dev->indirect = malloc(dev->num_indirect * sizeof(dev->indirect[0])); 452 453 if (dev->indirect == NULL) { 454 printf("%s: Could not allocate indirect tables\n", dev->name); 455 return ENOMEM; 456 } 457 458 memset(dev->indirect, 0, dev->num_indirect* sizeof(dev->indirect[0])); 459 460 for (i = 0; i < dev->num_indirect; i++) { 461 desc = &dev->indirect[i]; 462 if ((r = init_indirect_desc_table(desc)) != OK) { 463 464 /* error path */ 465 for (j = 0; j < i; j++) { 466 desc = &dev->indirect[j]; 467 free_contig(desc->descs, desc->len); 468 } 469 470 free(dev->indirect); 471 472 return r; 473 } 474 } 475 476 return OK; 477 } 478 479 static void 480 clear_indirect_table(struct virtio_device *dev, struct vring_desc *vd) 481 { 482 int i; 483 struct indirect_desc_table *desc; 484 485 assert(vd->len > 0); 486 assert(vd->flags & VRING_DESC_F_INDIRECT); 487 vd->flags = vd->flags & ~VRING_DESC_F_INDIRECT; 488 vd->len = 0;; 489 490 for (i = 0; i < dev->num_indirect; i++) { 491 desc = &dev->indirect[i]; 492 493 if (desc->paddr == vd->addr) { 494 assert(desc->in_use); 495 desc->in_use = 0; 496 break; 497 } 498 } 499 500 if (i >= dev->num_indirect) 501 panic("Could not clear indirect descriptor table "); 502 } 503 504 505 static void inline 506 use_vring_desc(struct vring_desc *vd, struct vumap_phys *vp) 507 { 508 vd->addr = vp->vp_addr & ~1UL; 509 vd->len = vp->vp_size; 510 vd->flags = VRING_DESC_F_NEXT; 511 512 if (vp->vp_addr & 1) 513 vd->flags |= VRING_DESC_F_WRITE; 514 } 515 516 static void 517 set_indirect_descriptors(struct virtio_device *dev, struct virtio_queue *q, 518 struct vumap_phys *bufs, size_t num) 519 { 520 /* Indirect descriptor tables are simply filled from left to right */ 521 int i; 522 struct indirect_desc_table *desc; 523 struct vring *vring = &q->vring; 524 struct vring_desc *vd, *ivd; 525 526 /* Find the first unused indirect descriptor table */ 527 for (i = 0; i < dev->num_indirect; i++) { 528 desc = &dev->indirect[i]; 529 530 /* If an unused indirect descriptor table was found, 531 * mark it as being used and exit the loop. 532 */ 533 if (!desc->in_use) { 534 desc->in_use = 1; 535 break; 536 } 537 } 538 539 /* Sanity check */ 540 if (i >= dev->num_indirect) 541 panic("No indirect descriptor tables left"); 542 543 /* For indirect descriptor tables, only a single descriptor from 544 * the main ring is used. 545 */ 546 vd = &vring->desc[q->free_head]; 547 vd->flags = VRING_DESC_F_INDIRECT; 548 vd->addr = desc->paddr; 549 vd->len = num * sizeof(desc->descs[0]); 550 551 /* Initialize the descriptors in the indirect descriptor table */ 552 for (i = 0; i < (int)num; i++) { 553 ivd = &desc->descs[i]; 554 555 use_vring_desc(ivd, &bufs[i]); 556 ivd->next = i + 1; 557 } 558 559 /* Unset the next bit of the last descriptor */ 560 ivd->flags = ivd->flags & ~VRING_DESC_F_NEXT; 561 562 /* Update queue, only a single descriptor was used */ 563 q->free_num -= 1; 564 q->free_head = vd->next; 565 } 566 567 static void 568 set_direct_descriptors(struct virtio_queue *q, struct vumap_phys *bufs, 569 size_t num) 570 { 571 u16_t i; 572 size_t count; 573 struct vring *vring = &q->vring; 574 struct vring_desc *vd; 575 576 for (i = q->free_head, count = 0; count < num; count++) { 577 578 /* The next free descriptor */ 579 vd = &vring->desc[i]; 580 581 /* The descriptor is linked in the free list, so 582 * it always has the next bit set. 583 */ 584 assert(vd->flags & VRING_DESC_F_NEXT); 585 586 use_vring_desc(vd, &bufs[count]); 587 i = vd->next; 588 } 589 590 /* Unset the next bit of the last descriptor */ 591 vd->flags = vd->flags & ~VRING_DESC_F_NEXT; 592 593 /* Update queue */ 594 q->free_num -= num; 595 q->free_head = i; 596 } 597 598 int 599 virtio_to_queue(struct virtio_device *dev, int qidx, struct vumap_phys *bufs, 600 size_t num, void *data) 601 { 602 u16_t free_first; 603 int left; 604 struct virtio_queue *q = &dev->queues[qidx]; 605 struct vring *vring = &q->vring; 606 607 assert(0 <= qidx && qidx <= dev->num_queues); 608 609 if (!data) 610 panic("%s: NULL data received queue %d", dev->name, qidx); 611 612 free_first = q->free_head; 613 614 left = (int)q->free_num - (int)num; 615 616 if (left < dev->threads) 617 set_indirect_descriptors(dev, q, bufs, num); 618 else 619 set_direct_descriptors(q, bufs, num); 620 621 /* Next index for host is old free_head */ 622 vring->avail->ring[vring->avail->idx % q->num] = free_first; 623 624 /* Provided by the caller to identify this slot */ 625 q->data[free_first] = data; 626 627 /* Make sure the host sees the new descriptors */ 628 __insn_barrier(); 629 630 /* advance last idx */ 631 vring->avail->idx += 1; 632 633 /* Make sure the host sees the avail->idx */ 634 __insn_barrier(); 635 636 /* kick it! */ 637 kick_queue(dev, qidx); 638 return 0; 639 } 640 641 int 642 virtio_from_queue(struct virtio_device *dev, int qidx, void **data) 643 { 644 struct virtio_queue *q; 645 struct vring *vring; 646 struct vring_used_elem *uel; 647 struct vring_desc *vd; 648 int count = 0; 649 u16_t idx; 650 u16_t used_idx; 651 652 assert(0 <= qidx && qidx < dev->num_queues); 653 654 q = &dev->queues[qidx]; 655 vring = &q->vring; 656 657 /* Make sure we see changes done by the host */ 658 __insn_barrier(); 659 660 /* The index from the host */ 661 used_idx = vring->used->idx % q->num; 662 663 /* We already saw this one, nothing to do here */ 664 if (q->last_used == used_idx) 665 return -1; 666 667 /* Get the vring_used element */ 668 uel = &q->vring.used->ring[q->last_used]; 669 670 /* Update the last used element */ 671 q->last_used = (q->last_used + 1) % q->num; 672 673 /* index of the used element */ 674 idx = uel->id % q->num; 675 676 assert(q->data[idx] != NULL); 677 678 /* Get the descriptor */ 679 vd = &vring->desc[idx]; 680 681 /* Unconditionally set the tail->next to the first used one */ 682 assert(vring->desc[q->free_tail].flags & VRING_DESC_F_NEXT); 683 vring->desc[q->free_tail].next = idx; 684 685 /* Find the last index, eventually there has to be one 686 * without a the next flag. 687 * 688 * FIXME: Protect from endless loop 689 */ 690 while (vd->flags & VRING_DESC_F_NEXT) { 691 692 if (vd->flags & VRING_DESC_F_INDIRECT) 693 clear_indirect_table(dev, vd); 694 695 idx = vd->next; 696 vd = &vring->desc[idx]; 697 count++; 698 } 699 700 /* Didn't count the last one */ 701 count++; 702 703 if (vd->flags & VRING_DESC_F_INDIRECT) 704 clear_indirect_table(dev, vd); 705 706 /* idx points to the tail now, update the queue */ 707 q->free_tail = idx; 708 assert(!(vd->flags & VRING_DESC_F_NEXT)); 709 710 /* We can always connect the tail with the head */ 711 vring->desc[q->free_tail].next = q->free_head; 712 vring->desc[q->free_tail].flags = VRING_DESC_F_NEXT; 713 714 q->free_num += count; 715 716 assert(q->free_num <= q->num); 717 718 *data = q->data[uel->id]; 719 q->data[uel->id] = NULL; 720 721 return 0; 722 } 723 724 int 725 virtio_had_irq(struct virtio_device *dev) 726 { 727 return virtio_read8(dev, VIRTIO_ISR_STATUS_OFF) & 1; 728 } 729 730 void 731 virtio_reset_device(struct virtio_device *dev) 732 { 733 virtio_irq_unregister(dev); 734 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0); 735 } 736 737 738 void 739 virtio_irq_enable(struct virtio_device *dev) 740 { 741 int r; 742 if ((r = sys_irqenable(&dev->irq_hook) != OK)) 743 panic("%s Unable to enable IRQ %d", dev->name, r); 744 } 745 746 void 747 virtio_irq_disable(struct virtio_device *dev) 748 { 749 int r; 750 if ((r = sys_irqdisable(&dev->irq_hook) != OK)) 751 panic("%s: Unable to disable IRQ %d", dev->name, r); 752 } 753 754 static int 755 wants_kick(struct virtio_queue *q) 756 { 757 assert(q != NULL); 758 return !(q->vring.used->flags & VRING_USED_F_NO_NOTIFY); 759 } 760 761 static void 762 kick_queue(struct virtio_device *dev, int qidx) 763 { 764 assert(0 <= qidx && qidx < dev->num_queues); 765 766 if (wants_kick(&dev->queues[qidx])) 767 virtio_write16(dev, VIRTIO_QNOTFIY_OFF, qidx); 768 769 return; 770 } 771 772 static int 773 is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid) 774 { 775 return vid == VIRTIO_VENDOR_ID && sdid == expected_sdid; 776 } 777 778 static void 779 virtio_irq_register(struct virtio_device *dev) 780 { 781 int r; 782 if ((r = sys_irqsetpolicy(dev->irq, 0, &dev->irq_hook) != OK)) 783 panic("%s: Unable to register IRQ %d", dev->name, r); 784 } 785 786 static void 787 virtio_irq_unregister(struct virtio_device *dev) 788 { 789 int r; 790 if ((r = sys_irqrmpolicy(&dev->irq_hook) != OK)) 791 panic("%s: Unable to unregister IRQ %d", dev->name, r); 792 } 793 794 static int 795 _supports(struct virtio_device *dev, int bit, int host) 796 { 797 for (int i = 0; i < dev->num_features; i++) { 798 struct virtio_feature *f = &dev->features[i]; 799 800 if (f->bit == bit) 801 return host ? f->host_support : f->guest_support; 802 } 803 804 panic("%s: Feature not found bit=%d", dev->name, bit); 805 } 806 807 int 808 virtio_host_supports(struct virtio_device *dev, int bit) 809 { 810 return _supports(dev, bit, 1); 811 } 812 813 int 814 virtio_guest_supports(struct virtio_device *dev, int bit) 815 { 816 return _supports(dev, bit, 0); 817 } 818 819 820 /* Just some wrappers around sys_read */ 821 #define VIRTIO_READ_XX(xx, suff) \ 822 u##xx##_t \ 823 virtio_read##xx(struct virtio_device *dev, i32_t off) \ 824 { \ 825 int r; \ 826 u32_t ret; \ 827 if ((r = sys_in##suff(dev->port + off, &ret)) != OK) \ 828 panic("%s: Read failed %d %d r=%d", dev->name, \ 829 dev->port, \ 830 off, \ 831 r); \ 832 \ 833 return ret; \ 834 } 835 836 VIRTIO_READ_XX(32, l) 837 VIRTIO_READ_XX(16, w) 838 VIRTIO_READ_XX(8, b) 839 840 /* Just some wrappers around sys_write */ 841 #define VIRTIO_WRITE_XX(xx, suff) \ 842 void \ 843 virtio_write##xx(struct virtio_device *dev, i32_t off, u##xx##_t val) \ 844 { \ 845 int r; \ 846 if ((r = sys_out##suff(dev->port + off, val)) != OK) \ 847 panic("%s: Write failed %d %d r=%d", dev->name, \ 848 dev->port, \ 849 off, \ 850 r); \ 851 } 852 853 VIRTIO_WRITE_XX(32, l) 854 VIRTIO_WRITE_XX(16, w) 855 VIRTIO_WRITE_XX(8, b) 856 857 /* Just some wrappers around sys_read */ 858 #define VIRTIO_SREAD_XX(xx, suff) \ 859 u##xx##_t \ 860 virtio_sread##xx(struct virtio_device *dev, i32_t off) \ 861 { \ 862 int r; \ 863 u32_t ret; \ 864 off += VIRTIO_DEV_SPECIFIC_OFF; \ 865 \ 866 if (dev->msi) \ 867 off += VIRTIO_MSI_ADD_OFF; \ 868 \ 869 if ((r = sys_in##suff(dev->port + off, &ret)) != OK) \ 870 panic("%s: Read failed %d %d r=%d", dev->name, \ 871 dev->port, \ 872 off, \ 873 r); \ 874 \ 875 return ret; \ 876 } 877 878 VIRTIO_SREAD_XX(32, l) 879 VIRTIO_SREAD_XX(16, w) 880 VIRTIO_SREAD_XX(8, b) 881 882 /* Just some wrappers around sys_write */ 883 #define VIRTIO_SWRITE_XX(xx, suff) \ 884 void \ 885 virtio_swrite##xx(struct virtio_device *dev, i32_t off, u##xx##_t val) \ 886 { \ 887 int r; \ 888 off += VIRTIO_DEV_SPECIFIC_OFF; \ 889 \ 890 if (dev->msi) \ 891 off += VIRTIO_MSI_ADD_OFF; \ 892 \ 893 if ((r = sys_out##suff(dev->port + off, val)) != OK) \ 894 panic("%s: Write failed %d %d r=%d", dev->name, \ 895 dev->port, \ 896 off, \ 897 r); \ 898 } 899 900 VIRTIO_SWRITE_XX(32, l) 901 VIRTIO_SWRITE_XX(16, w) 902 VIRTIO_SWRITE_XX(8, b) 903