1 /* $NetBSD: virtio.c,v 1.10 2015/10/15 02:40:38 ozaki-r Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.10 2015/10/15 02:40:38 ozaki-r Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/atomic.h> 35 #include <sys/bus.h> 36 #include <sys/device.h> 37 #include <sys/kmem.h> 38 39 #include <dev/pci/pcidevs.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <dev/pci/virtioreg.h> 44 #include <dev/pci/virtiovar.h> 45 46 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 47 48 static int virtio_match(device_t, cfdata_t, void *); 49 static void virtio_attach(device_t, device_t, void *); 50 static int virtio_detach(device_t, int); 51 static int virtio_intr(void *arg); 52 static void virtio_soft_intr(void *arg); 53 static void virtio_init_vq(struct virtio_softc *, 54 struct virtqueue *, const bool); 55 56 CFATTACH_DECL3_NEW(virtio, sizeof(struct virtio_softc), 57 virtio_match, virtio_attach, virtio_detach, NULL, NULL, NULL, 58 DVF_DETACH_SHUTDOWN); 59 60 static void 61 virtio_set_status(struct virtio_softc *sc, int status) 62 { 63 int old = 0; 64 65 if (status != 0) 66 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 67 VIRTIO_CONFIG_DEVICE_STATUS); 68 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS, 69 status|old); 70 } 71 72 #define virtio_device_reset(sc) virtio_set_status((sc), 0) 73 74 static int 75 virtio_match(device_t parent, cfdata_t match, void *aux) 76 { 77 struct pci_attach_args *pa; 78 79 pa = (struct pci_attach_args *)aux; 80 switch (PCI_VENDOR(pa->pa_id)) { 81 case PCI_VENDOR_QUMRANET: 82 if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 83 PCI_PRODUCT(pa->pa_id)) && 84 (PCI_PRODUCT(pa->pa_id) <= 85 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) 86 return 1; 87 break; 88 } 89 90 return 0; 91 } 92 93 static const char *virtio_device_name[] = { 94 "Unknown (0)", /* 0 */ 95 "Network", /* 1 */ 96 "Block", /* 2 */ 97 "Console", /* 3 */ 98 "Entropy", /* 4 */ 99 "Memory Balloon", /* 5 */ 100 "Unknown (6)", /* 6 */ 101 "Unknown (7)", /* 7 */ 102 "Unknown (8)", /* 8 */ 103 "9P Transport" /* 9 */ 104 }; 105 #define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*)) 106 107 static void 108 virtio_attach(device_t parent, device_t self, void *aux) 109 { 110 struct virtio_softc *sc = device_private(self); 111 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 112 pci_chipset_tag_t pc = pa->pa_pc; 113 pcitag_t tag = pa->pa_tag; 114 int revision; 115 pcireg_t id; 116 char const *intrstr; 117 pci_intr_handle_t ih; 118 char intrbuf[PCI_INTRSTR_LEN]; 119 120 revision = PCI_REVISION(pa->pa_class); 121 if (revision != 0) { 122 aprint_normal(": unknown revision 0x%02x; giving up\n", 123 revision); 124 return; 125 } 126 aprint_normal("\n"); 127 aprint_naive("\n"); 128 129 /* subsystem ID shows what I am */ 130 id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG); 131 aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n", 132 (PCI_SUBSYS_ID(id) < NDEVNAMES? 133 virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"), 134 revision); 135 136 sc->sc_dev = self; 137 sc->sc_pc = pc; 138 sc->sc_tag = tag; 139 sc->sc_iot = pa->pa_iot; 140 sc->sc_dmat = pa->pa_dmat; 141 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 142 143 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 144 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) { 145 aprint_error_dev(self, "can't map i/o space\n"); 146 return; 147 } 148 149 virtio_device_reset(sc); 150 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 151 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 152 153 /* XXX: use softc as aux... */ 154 sc->sc_childdevid = PCI_SUBSYS_ID(id); 155 sc->sc_child = NULL; 156 config_found(self, sc, NULL); 157 if (sc->sc_child == NULL) { 158 aprint_error_dev(self, 159 "no matching child driver; not configured\n"); 160 return; 161 } 162 if (sc->sc_child == (void*)1) { /* this shows error */ 163 aprint_error_dev(self, 164 "virtio configuration failed\n"); 165 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 166 return; 167 } 168 169 if (pci_intr_map(pa, &ih)) { 170 aprint_error_dev(self, "couldn't map interrupt\n"); 171 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 172 return; 173 } 174 175 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 176 177 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 178 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true); 179 180 sc->sc_ih = pci_intr_establish_xname(pc, ih, sc->sc_ipl, virtio_intr, sc, 181 device_xname(sc->sc_dev)); 182 183 if (sc->sc_ih == NULL) { 184 aprint_error_dev(self, "couldn't establish interrupt"); 185 if (intrstr != NULL) 186 aprint_error(" at %s", intrstr); 187 aprint_error("\n"); 188 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 189 return; 190 } 191 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 192 193 sc->sc_soft_ih = NULL; 194 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) { 195 u_int flags = SOFTINT_NET; 196 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 197 flags |= SOFTINT_MPSAFE; 198 199 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 200 if (sc->sc_soft_ih == NULL) 201 aprint_error(": failed to establish soft interrupt\n"); 202 } 203 204 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 205 206 return; 207 } 208 209 static int 210 virtio_detach(device_t self, int flags) 211 { 212 struct virtio_softc *sc = device_private(self); 213 int r; 214 215 if (sc->sc_child != 0 && sc->sc_child != (void*)1) { 216 r = config_detach(sc->sc_child, flags); 217 if (r) 218 return r; 219 } 220 KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1); 221 KASSERT(sc->sc_vqs == 0); 222 if (sc->sc_ih != NULL) { 223 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 224 sc->sc_ih = NULL; 225 } 226 if (sc->sc_iosize) 227 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 228 sc->sc_iosize = 0; 229 230 return 0; 231 } 232 233 /* 234 * Reset the device. 235 */ 236 /* 237 * To reset the device to a known state, do following: 238 * virtio_reset(sc); // this will stop the device activity 239 * <dequeue finished requests>; // virtio_dequeue() still can be called 240 * <revoke pending requests in the vqs if any>; 241 * virtio_reinit_begin(sc); // dequeue prohibitted 242 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 243 * <some other initialization>; 244 * virtio_reinit_end(sc); // device activated; enqueue allowed 245 * Once attached, feature negotiation can only be allowed after virtio_reset. 246 */ 247 void 248 virtio_reset(struct virtio_softc *sc) 249 { 250 virtio_device_reset(sc); 251 } 252 253 void 254 virtio_reinit_start(struct virtio_softc *sc) 255 { 256 int i; 257 258 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 259 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 260 for (i = 0; i < sc->sc_nvqs; i++) { 261 int n; 262 struct virtqueue *vq = &sc->sc_vqs[i]; 263 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 264 VIRTIO_CONFIG_QUEUE_SELECT, 265 vq->vq_index); 266 n = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 267 VIRTIO_CONFIG_QUEUE_SIZE); 268 if (n == 0) /* vq disappeared */ 269 continue; 270 if (n != vq->vq_num) { 271 panic("%s: virtqueue size changed, vq index %d\n", 272 device_xname(sc->sc_dev), 273 vq->vq_index); 274 } 275 virtio_init_vq(sc, vq, true); 276 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 277 VIRTIO_CONFIG_QUEUE_ADDRESS, 278 (vq->vq_dmamap->dm_segs[0].ds_addr 279 / VIRTIO_PAGE_SIZE)); 280 } 281 } 282 283 void 284 virtio_reinit_end(struct virtio_softc *sc) 285 { 286 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 287 } 288 289 /* 290 * Feature negotiation. 291 */ 292 uint32_t 293 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features) 294 { 295 uint32_t r; 296 297 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 298 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 299 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 300 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 301 VIRTIO_CONFIG_DEVICE_FEATURES); 302 r &= guest_features; 303 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 304 VIRTIO_CONFIG_GUEST_FEATURES, r); 305 sc->sc_features = r; 306 if (r & VIRTIO_F_RING_INDIRECT_DESC) 307 sc->sc_indirect = true; 308 else 309 sc->sc_indirect = false; 310 311 return r; 312 } 313 314 /* 315 * Device configuration registers. 316 */ 317 uint8_t 318 virtio_read_device_config_1(struct virtio_softc *sc, int index) 319 { 320 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 321 sc->sc_config_offset + index); 322 } 323 324 uint16_t 325 virtio_read_device_config_2(struct virtio_softc *sc, int index) 326 { 327 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 328 sc->sc_config_offset + index); 329 } 330 331 uint32_t 332 virtio_read_device_config_4(struct virtio_softc *sc, int index) 333 { 334 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, 335 sc->sc_config_offset + index); 336 } 337 338 uint64_t 339 virtio_read_device_config_8(struct virtio_softc *sc, int index) 340 { 341 uint64_t r; 342 343 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 344 sc->sc_config_offset + index + sizeof(uint32_t)); 345 r <<= 32; 346 r += bus_space_read_4(sc->sc_iot, sc->sc_ioh, 347 sc->sc_config_offset + index); 348 return r; 349 } 350 351 void 352 virtio_write_device_config_1(struct virtio_softc *sc, 353 int index, uint8_t value) 354 { 355 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 356 sc->sc_config_offset + index, value); 357 } 358 359 void 360 virtio_write_device_config_2(struct virtio_softc *sc, 361 int index, uint16_t value) 362 { 363 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 364 sc->sc_config_offset + index, value); 365 } 366 367 void 368 virtio_write_device_config_4(struct virtio_softc *sc, 369 int index, uint32_t value) 370 { 371 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 372 sc->sc_config_offset + index, value); 373 } 374 375 void 376 virtio_write_device_config_8(struct virtio_softc *sc, 377 int index, uint64_t value) 378 { 379 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 380 sc->sc_config_offset + index, 381 value & 0xffffffff); 382 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 383 sc->sc_config_offset + index + sizeof(uint32_t), 384 value >> 32); 385 } 386 387 /* 388 * Interrupt handler. 389 */ 390 static int 391 virtio_intr(void *arg) 392 { 393 struct virtio_softc *sc = arg; 394 int isr, r = 0; 395 396 /* check and ack the interrupt */ 397 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 398 VIRTIO_CONFIG_ISR_STATUS); 399 if (isr == 0) 400 return 0; 401 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 402 (sc->sc_config_change != NULL)) 403 r = (sc->sc_config_change)(sc); 404 if (sc->sc_intrhand != NULL) { 405 if (sc->sc_soft_ih != NULL) 406 softint_schedule(sc->sc_soft_ih); 407 else 408 r |= (sc->sc_intrhand)(sc); 409 } 410 411 return r; 412 } 413 414 static void 415 virtio_soft_intr(void *arg) 416 { 417 struct virtio_softc *sc = arg; 418 419 KASSERT(sc->sc_intrhand != NULL); 420 421 (sc->sc_intrhand)(sc); 422 } 423 424 /* 425 * dmamap sync operations for a virtqueue. 426 */ 427 static inline void 428 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 429 { 430 /* availoffset == sizeof(vring_desc)*vq_num */ 431 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 432 ops); 433 } 434 435 static inline void 436 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 437 { 438 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 439 vq->vq_availoffset, 440 offsetof(struct vring_avail, ring) 441 + vq->vq_num * sizeof(uint16_t), 442 ops); 443 } 444 445 static inline void 446 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 447 { 448 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 449 vq->vq_usedoffset, 450 offsetof(struct vring_used, ring) 451 + vq->vq_num * sizeof(struct vring_used_elem), 452 ops); 453 } 454 455 static inline void 456 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 457 int ops) 458 { 459 int offset = vq->vq_indirectoffset 460 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 461 462 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 463 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 464 ops); 465 } 466 467 /* 468 * Can be used as sc_intrhand. 469 */ 470 /* 471 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 472 * and calls (*vq_done)() if some entries are consumed. 473 */ 474 int 475 virtio_vq_intr(struct virtio_softc *sc) 476 { 477 struct virtqueue *vq; 478 int i, r = 0; 479 480 for (i = 0; i < sc->sc_nvqs; i++) { 481 vq = &sc->sc_vqs[i]; 482 if (vq->vq_queued) { 483 vq->vq_queued = 0; 484 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 485 } 486 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 487 membar_consumer(); 488 if (vq->vq_used_idx != vq->vq_used->idx) { 489 if (vq->vq_done) 490 r |= (vq->vq_done)(vq); 491 } 492 } 493 494 return r; 495 } 496 497 /* 498 * Start/stop vq interrupt. No guarantee. 499 */ 500 void 501 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 502 { 503 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 504 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 505 vq->vq_queued++; 506 } 507 508 void 509 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 510 { 511 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 512 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 513 vq->vq_queued++; 514 } 515 516 /* 517 * Initialize vq structure. 518 */ 519 static void 520 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, const bool reinit) 521 { 522 int i, j; 523 int vq_size = vq->vq_num; 524 525 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 526 527 /* build the indirect descriptor chain */ 528 if (vq->vq_indirect != NULL) { 529 struct vring_desc *vd; 530 531 for (i = 0; i < vq_size; i++) { 532 vd = vq->vq_indirect; 533 vd += vq->vq_maxnsegs * i; 534 for (j = 0; j < vq->vq_maxnsegs-1; j++) 535 vd[j].next = j + 1; 536 } 537 } 538 539 /* free slot management */ 540 SIMPLEQ_INIT(&vq->vq_freelist); 541 for (i = 0; i < vq_size; i++) { 542 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 543 &vq->vq_entries[i], qe_list); 544 vq->vq_entries[i].qe_index = i; 545 } 546 if (!reinit) 547 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 548 549 /* enqueue/dequeue status */ 550 vq->vq_avail_idx = 0; 551 vq->vq_used_idx = 0; 552 vq->vq_queued = 0; 553 if (!reinit) { 554 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 555 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 556 } 557 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 558 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 559 vq->vq_queued++; 560 } 561 562 /* 563 * Allocate/free a vq. 564 */ 565 int 566 virtio_alloc_vq(struct virtio_softc *sc, 567 struct virtqueue *vq, int index, int maxsegsize, int maxnsegs, 568 const char *name) 569 { 570 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 571 int rsegs, r; 572 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 573 ~(VIRTIO_PAGE_SIZE-1)) 574 575 memset(vq, 0, sizeof(*vq)); 576 577 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 578 VIRTIO_CONFIG_QUEUE_SELECT, index); 579 vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 580 VIRTIO_CONFIG_QUEUE_SIZE); 581 if (vq_size == 0) { 582 aprint_error_dev(sc->sc_dev, 583 "virtqueue not exist, index %d for %s\n", 584 index, name); 585 goto err; 586 } 587 /* allocsize1: descriptor table + avail ring + pad */ 588 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 589 + sizeof(uint16_t)*(2+vq_size)); 590 /* allocsize2: used ring + pad */ 591 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 592 + sizeof(struct vring_used_elem)*vq_size); 593 /* allocsize3: indirect table */ 594 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 595 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 596 else 597 allocsize3 = 0; 598 allocsize = allocsize1 + allocsize2 + allocsize3; 599 600 /* alloc and map the memory */ 601 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 602 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 603 if (r != 0) { 604 aprint_error_dev(sc->sc_dev, 605 "virtqueue %d for %s allocation failed, " 606 "error code %d\n", index, name, r); 607 goto err; 608 } 609 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, 610 &vq->vq_vaddr, BUS_DMA_NOWAIT); 611 if (r != 0) { 612 aprint_error_dev(sc->sc_dev, 613 "virtqueue %d for %s map failed, " 614 "error code %d\n", index, name, r); 615 goto err; 616 } 617 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 618 BUS_DMA_NOWAIT, &vq->vq_dmamap); 619 if (r != 0) { 620 aprint_error_dev(sc->sc_dev, 621 "virtqueue %d for %s dmamap creation failed, " 622 "error code %d\n", index, name, r); 623 goto err; 624 } 625 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 626 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT); 627 if (r != 0) { 628 aprint_error_dev(sc->sc_dev, 629 "virtqueue %d for %s dmamap load failed, " 630 "error code %d\n", index, name, r); 631 goto err; 632 } 633 634 /* set the vq address */ 635 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 636 VIRTIO_CONFIG_QUEUE_ADDRESS, 637 (vq->vq_dmamap->dm_segs[0].ds_addr 638 / VIRTIO_PAGE_SIZE)); 639 640 /* remember addresses and offsets for later use */ 641 vq->vq_owner = sc; 642 vq->vq_num = vq_size; 643 vq->vq_index = index; 644 vq->vq_desc = vq->vq_vaddr; 645 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 646 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 647 vq->vq_usedoffset = allocsize1; 648 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 649 if (allocsize3 > 0) { 650 vq->vq_indirectoffset = allocsize1 + allocsize2; 651 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 652 + vq->vq_indirectoffset); 653 } 654 vq->vq_bytesize = allocsize; 655 vq->vq_maxsegsize = maxsegsize; 656 vq->vq_maxnsegs = maxnsegs; 657 658 /* free slot management */ 659 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 660 KM_NOSLEEP); 661 if (vq->vq_entries == NULL) { 662 r = ENOMEM; 663 goto err; 664 } 665 666 virtio_init_vq(sc, vq, false); 667 668 aprint_verbose_dev(sc->sc_dev, 669 "allocated %u byte for virtqueue %d for %s, " 670 "size %d\n", allocsize, index, name, vq_size); 671 if (allocsize3 > 0) 672 aprint_verbose_dev(sc->sc_dev, 673 "using %d byte (%d entries) " 674 "indirect descriptors\n", 675 allocsize3, maxnsegs * vq_size); 676 return 0; 677 678 err: 679 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 680 VIRTIO_CONFIG_QUEUE_ADDRESS, 0); 681 if (vq->vq_dmamap) 682 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 683 if (vq->vq_vaddr) 684 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 685 if (vq->vq_segs[0].ds_addr) 686 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 687 memset(vq, 0, sizeof(*vq)); 688 689 return -1; 690 } 691 692 int 693 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 694 { 695 struct vq_entry *qe; 696 int i = 0; 697 698 /* device must be already deactivated */ 699 /* confirm the vq is empty */ 700 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 701 i++; 702 } 703 if (i != vq->vq_num) { 704 printf("%s: freeing non-empty vq, index %d\n", 705 device_xname(sc->sc_dev), vq->vq_index); 706 return EBUSY; 707 } 708 709 /* tell device that there's no virtqueue any longer */ 710 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 711 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index); 712 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 713 VIRTIO_CONFIG_QUEUE_ADDRESS, 0); 714 715 kmem_free(vq->vq_entries, vq->vq_bytesize); 716 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 717 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 718 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 719 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 720 mutex_destroy(&vq->vq_freelist_lock); 721 mutex_destroy(&vq->vq_uring_lock); 722 mutex_destroy(&vq->vq_aring_lock); 723 memset(vq, 0, sizeof(*vq)); 724 725 return 0; 726 } 727 728 /* 729 * Free descriptor management. 730 */ 731 static struct vq_entry * 732 vq_alloc_entry(struct virtqueue *vq) 733 { 734 struct vq_entry *qe; 735 736 mutex_enter(&vq->vq_freelist_lock); 737 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 738 mutex_exit(&vq->vq_freelist_lock); 739 return NULL; 740 } 741 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 742 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 743 mutex_exit(&vq->vq_freelist_lock); 744 745 return qe; 746 } 747 748 static void 749 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 750 { 751 mutex_enter(&vq->vq_freelist_lock); 752 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 753 mutex_exit(&vq->vq_freelist_lock); 754 755 return; 756 } 757 758 /* 759 * Enqueue several dmamaps as a single request. 760 */ 761 /* 762 * Typical usage: 763 * <queue size> number of followings are stored in arrays 764 * - command blocks (in dmamem) should be pre-allocated and mapped 765 * - dmamaps for command blocks should be pre-allocated and loaded 766 * - dmamaps for payload should be pre-allocated 767 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 768 * if (r) // currently 0 or EAGAIN 769 * return r; 770 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 771 * if (r) { 772 * virtio_enqueue_abort(sc, vq, slot); 773 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 774 * return r; 775 * } 776 * r = virtio_enqueue_reserve(sc, vq, slot, 777 * dmamap_payload[slot]->dm_nsegs+1); 778 * // ^ +1 for command 779 * if (r) { // currently 0 or EAGAIN 780 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 781 * return r; // do not call abort() 782 * } 783 * <setup and prepare commands> 784 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 785 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 786 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 787 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 788 * virtio_enqueue_commit(sc, vq, slot, true); 789 */ 790 791 /* 792 * enqueue_prep: allocate a slot number 793 */ 794 int 795 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 796 { 797 struct vq_entry *qe1; 798 799 KASSERT(slotp != NULL); 800 801 qe1 = vq_alloc_entry(vq); 802 if (qe1 == NULL) 803 return EAGAIN; 804 /* next slot is not allocated yet */ 805 qe1->qe_next = -1; 806 *slotp = qe1->qe_index; 807 808 return 0; 809 } 810 811 /* 812 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 813 */ 814 int 815 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 816 int slot, int nsegs) 817 { 818 int indirect; 819 struct vq_entry *qe1 = &vq->vq_entries[slot]; 820 821 KASSERT(qe1->qe_next == -1); 822 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 823 824 if ((vq->vq_indirect != NULL) && 825 (nsegs >= MINSEG_INDIRECT) && 826 (nsegs <= vq->vq_maxnsegs)) 827 indirect = 1; 828 else 829 indirect = 0; 830 qe1->qe_indirect = indirect; 831 832 if (indirect) { 833 struct vring_desc *vd; 834 int i; 835 836 vd = &vq->vq_desc[qe1->qe_index]; 837 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr 838 + vq->vq_indirectoffset; 839 vd->addr += sizeof(struct vring_desc) 840 * vq->vq_maxnsegs * qe1->qe_index; 841 vd->len = sizeof(struct vring_desc) * nsegs; 842 vd->flags = VRING_DESC_F_INDIRECT; 843 844 vd = vq->vq_indirect; 845 vd += vq->vq_maxnsegs * qe1->qe_index; 846 qe1->qe_desc_base = vd; 847 848 for (i = 0; i < nsegs-1; i++) { 849 vd[i].flags = VRING_DESC_F_NEXT; 850 } 851 vd[i].flags = 0; 852 qe1->qe_next = 0; 853 854 return 0; 855 } else { 856 struct vring_desc *vd; 857 struct vq_entry *qe; 858 int i, s; 859 860 vd = &vq->vq_desc[0]; 861 qe1->qe_desc_base = vd; 862 qe1->qe_next = qe1->qe_index; 863 s = slot; 864 for (i = 0; i < nsegs - 1; i++) { 865 qe = vq_alloc_entry(vq); 866 if (qe == NULL) { 867 vd[s].flags = 0; 868 virtio_enqueue_abort(sc, vq, slot); 869 return EAGAIN; 870 } 871 vd[s].flags = VRING_DESC_F_NEXT; 872 vd[s].next = qe->qe_index; 873 s = qe->qe_index; 874 } 875 vd[s].flags = 0; 876 877 return 0; 878 } 879 } 880 881 /* 882 * enqueue: enqueue a single dmamap. 883 */ 884 int 885 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 886 bus_dmamap_t dmamap, bool write) 887 { 888 struct vq_entry *qe1 = &vq->vq_entries[slot]; 889 struct vring_desc *vd = qe1->qe_desc_base; 890 int i; 891 int s = qe1->qe_next; 892 893 KASSERT(s >= 0); 894 KASSERT(dmamap->dm_nsegs > 0); 895 896 for (i = 0; i < dmamap->dm_nsegs; i++) { 897 vd[s].addr = dmamap->dm_segs[i].ds_addr; 898 vd[s].len = dmamap->dm_segs[i].ds_len; 899 if (!write) 900 vd[s].flags |= VRING_DESC_F_WRITE; 901 s = vd[s].next; 902 } 903 qe1->qe_next = s; 904 905 return 0; 906 } 907 908 int 909 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 910 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 911 bool write) 912 { 913 struct vq_entry *qe1 = &vq->vq_entries[slot]; 914 struct vring_desc *vd = qe1->qe_desc_base; 915 int s = qe1->qe_next; 916 917 KASSERT(s >= 0); 918 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 919 KASSERT((dmamap->dm_segs[0].ds_len > start) && 920 (dmamap->dm_segs[0].ds_len >= start + len)); 921 922 vd[s].addr = dmamap->dm_segs[0].ds_addr + start; 923 vd[s].len = len; 924 if (!write) 925 vd[s].flags |= VRING_DESC_F_WRITE; 926 qe1->qe_next = vd[s].next; 927 928 return 0; 929 } 930 931 /* 932 * enqueue_commit: add it to the aring. 933 */ 934 int 935 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 936 bool notifynow) 937 { 938 struct vq_entry *qe1; 939 940 if (slot < 0) { 941 mutex_enter(&vq->vq_aring_lock); 942 goto notify; 943 } 944 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 945 qe1 = &vq->vq_entries[slot]; 946 if (qe1->qe_indirect) 947 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 948 mutex_enter(&vq->vq_aring_lock); 949 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot; 950 951 notify: 952 if (notifynow) { 953 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 954 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 955 membar_producer(); 956 vq->vq_avail->idx = vq->vq_avail_idx; 957 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 958 membar_producer(); 959 vq->vq_queued++; 960 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 961 membar_consumer(); 962 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) 963 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 964 VIRTIO_CONFIG_QUEUE_NOTIFY, 965 vq->vq_index); 966 } 967 mutex_exit(&vq->vq_aring_lock); 968 969 return 0; 970 } 971 972 /* 973 * enqueue_abort: rollback. 974 */ 975 int 976 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 977 { 978 struct vq_entry *qe = &vq->vq_entries[slot]; 979 struct vring_desc *vd; 980 int s; 981 982 if (qe->qe_next < 0) { 983 vq_free_entry(vq, qe); 984 return 0; 985 } 986 987 s = slot; 988 vd = &vq->vq_desc[0]; 989 while (vd[s].flags & VRING_DESC_F_NEXT) { 990 s = vd[s].next; 991 vq_free_entry(vq, qe); 992 qe = &vq->vq_entries[s]; 993 } 994 vq_free_entry(vq, qe); 995 return 0; 996 } 997 998 /* 999 * Dequeue a request. 1000 */ 1001 /* 1002 * dequeue: dequeue a request from uring; dmamap_sync for uring is 1003 * already done in the interrupt handler. 1004 */ 1005 int 1006 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 1007 int *slotp, int *lenp) 1008 { 1009 uint16_t slot, usedidx; 1010 struct vq_entry *qe; 1011 1012 if (vq->vq_used_idx == vq->vq_used->idx) 1013 return ENOENT; 1014 mutex_enter(&vq->vq_uring_lock); 1015 usedidx = vq->vq_used_idx++; 1016 mutex_exit(&vq->vq_uring_lock); 1017 usedidx %= vq->vq_num; 1018 slot = vq->vq_used->ring[usedidx].id; 1019 qe = &vq->vq_entries[slot]; 1020 1021 if (qe->qe_indirect) 1022 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 1023 1024 if (slotp) 1025 *slotp = slot; 1026 if (lenp) 1027 *lenp = vq->vq_used->ring[usedidx].len; 1028 1029 return 0; 1030 } 1031 1032 /* 1033 * dequeue_commit: complete dequeue; the slot is recycled for future use. 1034 * if you forget to call this the slot will be leaked. 1035 */ 1036 int 1037 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1038 { 1039 struct vq_entry *qe = &vq->vq_entries[slot]; 1040 struct vring_desc *vd = &vq->vq_desc[0]; 1041 int s = slot; 1042 1043 while (vd[s].flags & VRING_DESC_F_NEXT) { 1044 s = vd[s].next; 1045 vq_free_entry(vq, qe); 1046 qe = &vq->vq_entries[s]; 1047 } 1048 vq_free_entry(vq, qe); 1049 1050 return 0; 1051 } 1052