1 /* $NetBSD: virtio.c,v 1.53 2021/10/28 01:36:43 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.53 2021/10/28 01:36:43 yamaguchi Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/atomic.h> 37 #include <sys/bus.h> 38 #include <sys/device.h> 39 #include <sys/kmem.h> 40 #include <sys/module.h> 41 42 #define VIRTIO_PRIVATE 43 44 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 45 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 46 47 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 48 49 /* incomplete list */ 50 static const char *virtio_device_name[] = { 51 "unknown (0)", /* 0 */ 52 "network", /* 1 */ 53 "block", /* 2 */ 54 "console", /* 3 */ 55 "entropy", /* 4 */ 56 "memory balloon", /* 5 */ 57 "I/O memory", /* 6 */ 58 "remote processor messaging", /* 7 */ 59 "SCSI", /* 8 */ 60 "9P transport", /* 9 */ 61 }; 62 #define NDEVNAMES __arraycount(virtio_device_name) 63 64 static void virtio_init_vq(struct virtio_softc *, 65 struct virtqueue *, const bool); 66 67 void 68 virtio_set_status(struct virtio_softc *sc, int status) 69 { 70 sc->sc_ops->set_status(sc, status); 71 } 72 73 /* 74 * Reset the device. 75 */ 76 /* 77 * To reset the device to a known state, do following: 78 * virtio_reset(sc); // this will stop the device activity 79 * <dequeue finished requests>; // virtio_dequeue() still can be called 80 * <revoke pending requests in the vqs if any>; 81 * virtio_reinit_start(sc); // dequeue prohibitted 82 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 83 * <some other initialization>; 84 * virtio_reinit_end(sc); // device activated; enqueue allowed 85 * Once attached, feature negotiation can only be allowed after virtio_reset. 86 */ 87 void 88 virtio_reset(struct virtio_softc *sc) 89 { 90 virtio_device_reset(sc); 91 } 92 93 int 94 virtio_reinit_start(struct virtio_softc *sc) 95 { 96 int i, r; 97 98 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 99 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 100 for (i = 0; i < sc->sc_nvqs; i++) { 101 int n; 102 struct virtqueue *vq = &sc->sc_vqs[i]; 103 n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 104 if (n == 0) /* vq disappeared */ 105 continue; 106 if (n != vq->vq_num) { 107 panic("%s: virtqueue size changed, vq index %d\n", 108 device_xname(sc->sc_dev), 109 vq->vq_index); 110 } 111 virtio_init_vq(sc, vq, true); 112 sc->sc_ops->setup_queue(sc, vq->vq_index, 113 vq->vq_dmamap->dm_segs[0].ds_addr); 114 } 115 116 r = sc->sc_ops->setup_interrupts(sc, 1); 117 if (r != 0) 118 goto fail; 119 120 return 0; 121 122 fail: 123 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 124 125 return 1; 126 } 127 128 void 129 virtio_reinit_end(struct virtio_softc *sc) 130 { 131 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 132 } 133 134 /* 135 * Feature negotiation. 136 */ 137 void 138 virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features) 139 { 140 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 141 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 142 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 143 sc->sc_ops->neg_features(sc, guest_features); 144 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC) 145 sc->sc_indirect = true; 146 else 147 sc->sc_indirect = false; 148 } 149 150 151 /* 152 * Device configuration registers readers/writers 153 */ 154 #if 0 155 #define DPRINTFR(n, fmt, val, index, num) \ 156 printf("\n%s (", n); \ 157 for (int i = 0; i < num; i++) \ 158 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \ 159 printf(") -> "); printf(fmt, val); printf("\n"); 160 #define DPRINTFR2(n, fmt, val_s, val_n) \ 161 printf("%s ", n); \ 162 printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n"); 163 #else 164 #define DPRINTFR(n, fmt, val, index, num) 165 #define DPRINTFR2(n, fmt, val_s, val_n) 166 #endif 167 168 169 uint8_t 170 virtio_read_device_config_1(struct virtio_softc *sc, int index) { 171 bus_space_tag_t iot = sc->sc_devcfg_iot; 172 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 173 uint8_t val; 174 175 val = bus_space_read_1(iot, ioh, index); 176 177 DPRINTFR("read_1", "%02x", val, index, 1); 178 return val; 179 } 180 181 uint16_t 182 virtio_read_device_config_2(struct virtio_softc *sc, int index) { 183 bus_space_tag_t iot = sc->sc_devcfg_iot; 184 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 185 uint16_t val; 186 187 val = bus_space_read_2(iot, ioh, index); 188 if (BYTE_ORDER != sc->sc_bus_endian) 189 val = bswap16(val); 190 191 DPRINTFR("read_2", "%04x", val, index, 2); 192 DPRINTFR2("read_2", "%04x", 193 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 194 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 195 return val; 196 } 197 198 uint32_t 199 virtio_read_device_config_4(struct virtio_softc *sc, int index) { 200 bus_space_tag_t iot = sc->sc_devcfg_iot; 201 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 202 uint32_t val; 203 204 val = bus_space_read_4(iot, ioh, index); 205 if (BYTE_ORDER != sc->sc_bus_endian) 206 val = bswap32(val); 207 208 DPRINTFR("read_4", "%08x", val, index, 4); 209 DPRINTFR2("read_4", "%08x", 210 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 211 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 212 return val; 213 } 214 215 /* 216 * The Virtio spec explicitly tells that reading and writing 8 bytes are not 217 * considered atomic and no triggers may be connected to reading or writing 218 * it. We access it using two 32 reads. See virtio spec 4.1.3.1. 219 */ 220 uint64_t 221 virtio_read_device_config_8(struct virtio_softc *sc, int index) { 222 bus_space_tag_t iot = sc->sc_devcfg_iot; 223 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 224 union { 225 uint64_t u64; 226 uint32_t l[2]; 227 } v; 228 uint64_t val; 229 230 v.l[0] = bus_space_read_4(iot, ioh, index); 231 v.l[1] = bus_space_read_4(iot, ioh, index + 4); 232 if (sc->sc_bus_endian != sc->sc_struct_endian) { 233 v.l[0] = bswap32(v.l[0]); 234 v.l[1] = bswap32(v.l[1]); 235 } 236 val = v.u64; 237 238 if (BYTE_ORDER != sc->sc_struct_endian) 239 val = bswap64(val); 240 241 DPRINTFR("read_8", "%08lx", val, index, 8); 242 DPRINTFR2("read_8 low ", "%08x", 243 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), 244 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 245 DPRINTFR2("read_8 high ", "%08x", 246 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4), 247 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4)); 248 return val; 249 } 250 251 /* 252 * In the older virtio spec, device config registers are host endian. On newer 253 * they are little endian. Some newer devices however explicitly specify their 254 * register to always be little endian. These fuctions cater for these. 255 */ 256 uint16_t 257 virtio_read_device_config_le_2(struct virtio_softc *sc, int index) { 258 bus_space_tag_t iot = sc->sc_devcfg_iot; 259 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 260 uint16_t val; 261 262 val = bus_space_read_2(iot, ioh, index); 263 if (sc->sc_bus_endian != LITTLE_ENDIAN) 264 val = bswap16(val); 265 266 DPRINTFR("read_le_2", "%04x", val, index, 2); 267 DPRINTFR2("read_le_2", "%04x", 268 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 269 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 270 return val; 271 } 272 273 uint32_t 274 virtio_read_device_config_le_4(struct virtio_softc *sc, int index) { 275 bus_space_tag_t iot = sc->sc_devcfg_iot; 276 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 277 uint32_t val; 278 279 val = bus_space_read_4(iot, ioh, index); 280 if (sc->sc_bus_endian != LITTLE_ENDIAN) 281 val = bswap32(val); 282 283 DPRINTFR("read_le_4", "%08x", val, index, 4); 284 DPRINTFR2("read_le_4", "%08x", 285 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 286 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 287 return val; 288 } 289 290 void 291 virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value) 292 { 293 bus_space_tag_t iot = sc->sc_devcfg_iot; 294 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 295 296 bus_space_write_1(iot, ioh, index, value); 297 } 298 299 void 300 virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value) 301 { 302 bus_space_tag_t iot = sc->sc_devcfg_iot; 303 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 304 305 if (BYTE_ORDER != sc->sc_bus_endian) 306 value = bswap16(value); 307 bus_space_write_2(iot, ioh, index, value); 308 } 309 310 void 311 virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value) 312 { 313 bus_space_tag_t iot = sc->sc_devcfg_iot; 314 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 315 316 if (BYTE_ORDER != sc->sc_bus_endian) 317 value = bswap32(value); 318 bus_space_write_4(iot, ioh, index, value); 319 } 320 321 /* 322 * The Virtio spec explicitly tells that reading and writing 8 bytes are not 323 * considered atomic and no triggers may be connected to reading or writing 324 * it. We access it using two 32 bit writes. For good measure it is stated to 325 * always write lsb first just in case of a hypervisor bug. See See virtio 326 * spec 4.1.3.1. 327 */ 328 void 329 virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value) 330 { 331 bus_space_tag_t iot = sc->sc_devcfg_iot; 332 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 333 union { 334 uint64_t u64; 335 uint32_t l[2]; 336 } v; 337 338 if (BYTE_ORDER != sc->sc_struct_endian) 339 value = bswap64(value); 340 341 v.u64 = value; 342 if (sc->sc_bus_endian != sc->sc_struct_endian) { 343 v.l[0] = bswap32(v.l[0]); 344 v.l[1] = bswap32(v.l[1]); 345 } 346 347 if (sc->sc_struct_endian == LITTLE_ENDIAN) { 348 bus_space_write_4(iot, ioh, index, v.l[0]); 349 bus_space_write_4(iot, ioh, index + 4, v.l[1]); 350 } else { 351 bus_space_write_4(iot, ioh, index + 4, v.l[1]); 352 bus_space_write_4(iot, ioh, index, v.l[0]); 353 } 354 } 355 356 /* 357 * In the older virtio spec, device config registers are host endian. On newer 358 * they are little endian. Some newer devices however explicitly specify their 359 * register to always be little endian. These fuctions cater for these. 360 */ 361 void 362 virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value) 363 { 364 bus_space_tag_t iot = sc->sc_devcfg_iot; 365 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 366 367 if (sc->sc_bus_endian != LITTLE_ENDIAN) 368 value = bswap16(value); 369 bus_space_write_2(iot, ioh, index, value); 370 } 371 372 void 373 virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value) 374 { 375 bus_space_tag_t iot = sc->sc_devcfg_iot; 376 bus_space_handle_t ioh = sc->sc_devcfg_ioh; 377 378 if (sc->sc_bus_endian != LITTLE_ENDIAN) 379 value = bswap32(value); 380 bus_space_write_4(iot, ioh, index, value); 381 } 382 383 384 /* 385 * data structures endian helpers 386 */ 387 uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val) 388 { 389 KASSERT(sc); 390 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val; 391 } 392 393 uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val) 394 { 395 KASSERT(sc); 396 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val; 397 } 398 399 uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val) 400 { 401 KASSERT(sc); 402 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val; 403 } 404 405 406 /* 407 * Interrupt handler. 408 */ 409 static void 410 virtio_soft_intr(void *arg) 411 { 412 struct virtio_softc *sc = arg; 413 414 KASSERT(sc->sc_intrhand != NULL); 415 416 (sc->sc_intrhand)(sc); 417 } 418 419 /* 420 * dmamap sync operations for a virtqueue. 421 */ 422 static inline void 423 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 424 { 425 /* availoffset == sizeof(vring_desc)*vq_num */ 426 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 427 ops); 428 } 429 430 static inline void 431 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 432 { 433 uint16_t hdrlen = offsetof(struct vring_avail, ring); 434 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 435 hdrlen += sizeof(uint16_t); 436 437 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 438 vq->vq_availoffset, 439 hdrlen + sc->sc_nvqs * sizeof(uint16_t), 440 ops); 441 } 442 443 static inline void 444 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 445 { 446 uint16_t hdrlen = offsetof(struct vring_used, ring); 447 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 448 hdrlen += sizeof(uint16_t); 449 450 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 451 vq->vq_usedoffset, 452 hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem), 453 ops); 454 } 455 456 static inline void 457 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 458 int ops) 459 { 460 int offset = vq->vq_indirectoffset 461 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 462 463 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 464 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 465 ops); 466 } 467 468 /* 469 * Can be used as sc_intrhand. 470 */ 471 /* 472 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 473 * and calls (*vq_done)() if some entries are consumed. 474 */ 475 bool 476 virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq) 477 { 478 479 if (vq->vq_queued) { 480 vq->vq_queued = 0; 481 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 482 } 483 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 484 membar_consumer(); 485 486 return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0; 487 } 488 489 int 490 virtio_vq_intr(struct virtio_softc *sc) 491 { 492 struct virtqueue *vq; 493 int i, r = 0; 494 495 for (i = 0; i < sc->sc_nvqs; i++) { 496 vq = &sc->sc_vqs[i]; 497 if (virtio_vq_is_enqueued(sc, vq) == 1) { 498 if (vq->vq_done) 499 r |= (vq->vq_done)(vq); 500 } 501 } 502 503 return r; 504 } 505 506 int 507 virtio_vq_intrhand(struct virtio_softc *sc) 508 { 509 struct virtqueue *vq; 510 int i, r = 0; 511 512 for (i = 0; i < sc->sc_nvqs; i++) { 513 vq = &sc->sc_vqs[i]; 514 r |= (vq->vq_intrhand)(vq->vq_intrhand_arg); 515 } 516 517 return r; 518 } 519 520 521 /* 522 * Increase the event index in order to delay interrupts. 523 */ 524 int 525 virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq, 526 uint16_t nslots) 527 { 528 uint16_t idx, nused; 529 530 idx = vq->vq_used_idx + nslots; 531 532 /* set the new event index: avail_ring->used_event = idx */ 533 *vq->vq_used_event = virtio_rw16(sc, idx); 534 membar_producer(); 535 536 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE); 537 vq->vq_queued++; 538 539 nused = (uint16_t) 540 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx); 541 KASSERT(nused <= vq->vq_num); 542 543 return nslots < nused; 544 } 545 546 /* 547 * Postpone interrupt until 3/4 of the available descriptors have been 548 * consumed. 549 */ 550 int 551 virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq) 552 { 553 uint16_t nslots; 554 555 nslots = (uint16_t) 556 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4; 557 558 return virtio_postpone_intr(sc, vq, nslots); 559 } 560 561 /* 562 * Postpone interrupt until all of the available descriptors have been 563 * consumed. 564 */ 565 int 566 virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq) 567 { 568 uint16_t nslots; 569 570 nslots = (uint16_t) 571 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx); 572 573 return virtio_postpone_intr(sc, vq, nslots); 574 } 575 576 /* 577 * Start/stop vq interrupt. No guarantee. 578 */ 579 void 580 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 581 { 582 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 583 /* 584 * No way to disable the interrupt completely with 585 * RingEventIdx. Instead advance used_event by half the 586 * possible value. This won't happen soon and is far enough in 587 * the past to not trigger a spurios interrupt. 588 */ 589 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000); 590 } else { 591 vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 592 } 593 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 594 vq->vq_queued++; 595 } 596 597 int 598 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 599 { 600 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 601 /* 602 * If event index feature is negotiated, enabling interrupts 603 * is done through setting the latest consumed index in the 604 * used_event field 605 */ 606 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx); 607 } else { 608 vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 609 } 610 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 611 vq->vq_queued++; 612 613 return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx); 614 } 615 616 /* 617 * Initialize vq structure. 618 */ 619 static void 620 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, 621 const bool reinit) 622 { 623 int i, j; 624 int vq_size = vq->vq_num; 625 626 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 627 628 /* build the indirect descriptor chain */ 629 if (vq->vq_indirect != NULL) { 630 struct vring_desc *vd; 631 632 for (i = 0; i < vq_size; i++) { 633 vd = vq->vq_indirect; 634 vd += vq->vq_maxnsegs * i; 635 for (j = 0; j < vq->vq_maxnsegs-1; j++) { 636 vd[j].next = virtio_rw16(sc, j + 1); 637 } 638 } 639 } 640 641 /* free slot management */ 642 SIMPLEQ_INIT(&vq->vq_freelist); 643 for (i = 0; i < vq_size; i++) { 644 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 645 &vq->vq_entries[i], qe_list); 646 vq->vq_entries[i].qe_index = i; 647 } 648 if (!reinit) 649 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 650 651 /* enqueue/dequeue status */ 652 vq->vq_avail_idx = 0; 653 vq->vq_used_idx = 0; 654 vq->vq_queued = 0; 655 if (!reinit) { 656 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 657 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 658 } 659 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 660 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 661 vq->vq_queued++; 662 } 663 664 /* 665 * Allocate/free a vq. 666 */ 667 int 668 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 669 int maxsegsize, int maxnsegs, const char *name) 670 { 671 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 672 int rsegs, r, hdrlen; 673 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 674 ~(VIRTIO_PAGE_SIZE-1)) 675 676 /* Make sure callers allocate vqs in order */ 677 KASSERT(sc->sc_nvqs == index); 678 679 memset(vq, 0, sizeof(*vq)); 680 681 vq_size = sc->sc_ops->read_queue_size(sc, index); 682 if (vq_size == 0) { 683 aprint_error_dev(sc->sc_dev, 684 "virtqueue not exist, index %d for %s\n", 685 index, name); 686 goto err; 687 } 688 689 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2; 690 691 /* allocsize1: descriptor table + avail ring + pad */ 692 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 693 + sizeof(uint16_t)*(hdrlen + vq_size)); 694 /* allocsize2: used ring + pad */ 695 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen 696 + sizeof(struct vring_used_elem)*vq_size); 697 /* allocsize3: indirect table */ 698 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 699 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 700 else 701 allocsize3 = 0; 702 allocsize = allocsize1 + allocsize2 + allocsize3; 703 704 /* alloc and map the memory */ 705 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 706 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK); 707 if (r != 0) { 708 aprint_error_dev(sc->sc_dev, 709 "virtqueue %d for %s allocation failed, " 710 "error code %d\n", index, name, r); 711 goto err; 712 } 713 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize, 714 &vq->vq_vaddr, BUS_DMA_WAITOK); 715 if (r != 0) { 716 aprint_error_dev(sc->sc_dev, 717 "virtqueue %d for %s map failed, " 718 "error code %d\n", index, name, r); 719 goto err; 720 } 721 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 722 BUS_DMA_WAITOK, &vq->vq_dmamap); 723 if (r != 0) { 724 aprint_error_dev(sc->sc_dev, 725 "virtqueue %d for %s dmamap creation failed, " 726 "error code %d\n", index, name, r); 727 goto err; 728 } 729 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 730 vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK); 731 if (r != 0) { 732 aprint_error_dev(sc->sc_dev, 733 "virtqueue %d for %s dmamap load failed, " 734 "error code %d\n", index, name, r); 735 goto err; 736 } 737 738 /* remember addresses and offsets for later use */ 739 vq->vq_owner = sc; 740 vq->vq_num = vq_size; 741 vq->vq_index = index; 742 vq->vq_desc = vq->vq_vaddr; 743 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 744 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 745 vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail + 746 offsetof(struct vring_avail, ring[vq->vq_num])); 747 vq->vq_usedoffset = allocsize1; 748 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 749 vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used + 750 offsetof(struct vring_used, ring[vq->vq_num])); 751 752 if (allocsize3 > 0) { 753 vq->vq_indirectoffset = allocsize1 + allocsize2; 754 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 755 + vq->vq_indirectoffset); 756 } 757 vq->vq_bytesize = allocsize; 758 vq->vq_maxsegsize = maxsegsize; 759 vq->vq_maxnsegs = maxnsegs; 760 761 /* free slot management */ 762 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 763 KM_SLEEP); 764 virtio_init_vq(sc, vq, false); 765 766 /* set the vq address */ 767 sc->sc_ops->setup_queue(sc, index, 768 vq->vq_dmamap->dm_segs[0].ds_addr); 769 770 aprint_verbose_dev(sc->sc_dev, 771 "allocated %u byte for virtqueue %d for %s, " 772 "size %d\n", allocsize, index, name, vq_size); 773 if (allocsize3 > 0) 774 aprint_verbose_dev(sc->sc_dev, 775 "using %d byte (%d entries) " 776 "indirect descriptors\n", 777 allocsize3, maxnsegs * vq_size); 778 779 sc->sc_nvqs++; 780 781 return 0; 782 783 err: 784 sc->sc_ops->setup_queue(sc, index, 0); 785 if (vq->vq_dmamap) 786 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 787 if (vq->vq_vaddr) 788 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 789 if (vq->vq_segs[0].ds_addr) 790 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 791 memset(vq, 0, sizeof(*vq)); 792 793 return -1; 794 } 795 796 int 797 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 798 { 799 struct vq_entry *qe; 800 int i = 0; 801 802 /* device must be already deactivated */ 803 /* confirm the vq is empty */ 804 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 805 i++; 806 } 807 if (i != vq->vq_num) { 808 printf("%s: freeing non-empty vq, index %d\n", 809 device_xname(sc->sc_dev), vq->vq_index); 810 return EBUSY; 811 } 812 813 /* tell device that there's no virtqueue any longer */ 814 sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 815 816 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); 817 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 818 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 819 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 820 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 821 mutex_destroy(&vq->vq_freelist_lock); 822 mutex_destroy(&vq->vq_uring_lock); 823 mutex_destroy(&vq->vq_aring_lock); 824 memset(vq, 0, sizeof(*vq)); 825 826 sc->sc_nvqs--; 827 828 return 0; 829 } 830 831 /* 832 * Free descriptor management. 833 */ 834 static struct vq_entry * 835 vq_alloc_entry(struct virtqueue *vq) 836 { 837 struct vq_entry *qe; 838 839 mutex_enter(&vq->vq_freelist_lock); 840 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 841 mutex_exit(&vq->vq_freelist_lock); 842 return NULL; 843 } 844 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 845 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 846 mutex_exit(&vq->vq_freelist_lock); 847 848 return qe; 849 } 850 851 static void 852 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 853 { 854 mutex_enter(&vq->vq_freelist_lock); 855 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 856 mutex_exit(&vq->vq_freelist_lock); 857 858 return; 859 } 860 861 /* 862 * Enqueue several dmamaps as a single request. 863 */ 864 /* 865 * Typical usage: 866 * <queue size> number of followings are stored in arrays 867 * - command blocks (in dmamem) should be pre-allocated and mapped 868 * - dmamaps for command blocks should be pre-allocated and loaded 869 * - dmamaps for payload should be pre-allocated 870 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 871 * if (r) // currently 0 or EAGAIN 872 * return r; 873 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 874 * if (r) { 875 * virtio_enqueue_abort(sc, vq, slot); 876 * return r; 877 * } 878 * r = virtio_enqueue_reserve(sc, vq, slot, 879 * dmamap_payload[slot]->dm_nsegs+1); 880 * // ^ +1 for command 881 * if (r) { // currently 0 or EAGAIN 882 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 883 * return r; // do not call abort() 884 * } 885 * <setup and prepare commands> 886 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 887 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 888 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 889 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 890 * virtio_enqueue_commit(sc, vq, slot, true); 891 */ 892 893 /* 894 * enqueue_prep: allocate a slot number 895 */ 896 int 897 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 898 { 899 struct vq_entry *qe1; 900 901 KASSERT(slotp != NULL); 902 903 qe1 = vq_alloc_entry(vq); 904 if (qe1 == NULL) 905 return EAGAIN; 906 /* next slot is not allocated yet */ 907 qe1->qe_next = -1; 908 *slotp = qe1->qe_index; 909 910 return 0; 911 } 912 913 /* 914 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 915 */ 916 int 917 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 918 int slot, int nsegs) 919 { 920 int indirect; 921 struct vq_entry *qe1 = &vq->vq_entries[slot]; 922 923 KASSERT(qe1->qe_next == -1); 924 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 925 926 if ((vq->vq_indirect != NULL) && 927 (nsegs >= MINSEG_INDIRECT) && 928 (nsegs <= vq->vq_maxnsegs)) 929 indirect = 1; 930 else 931 indirect = 0; 932 qe1->qe_indirect = indirect; 933 934 if (indirect) { 935 struct vring_desc *vd; 936 uint64_t addr; 937 int i; 938 939 vd = &vq->vq_desc[qe1->qe_index]; 940 addr = vq->vq_dmamap->dm_segs[0].ds_addr 941 + vq->vq_indirectoffset; 942 addr += sizeof(struct vring_desc) 943 * vq->vq_maxnsegs * qe1->qe_index; 944 vd->addr = virtio_rw64(sc, addr); 945 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs); 946 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT); 947 948 vd = vq->vq_indirect; 949 vd += vq->vq_maxnsegs * qe1->qe_index; 950 qe1->qe_desc_base = vd; 951 952 for (i = 0; i < nsegs-1; i++) { 953 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 954 } 955 vd[i].flags = virtio_rw16(sc, 0); 956 qe1->qe_next = 0; 957 958 return 0; 959 } else { 960 struct vring_desc *vd; 961 struct vq_entry *qe; 962 int i, s; 963 964 vd = &vq->vq_desc[0]; 965 qe1->qe_desc_base = vd; 966 qe1->qe_next = qe1->qe_index; 967 s = slot; 968 for (i = 0; i < nsegs - 1; i++) { 969 qe = vq_alloc_entry(vq); 970 if (qe == NULL) { 971 vd[s].flags = virtio_rw16(sc, 0); 972 virtio_enqueue_abort(sc, vq, slot); 973 return EAGAIN; 974 } 975 vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 976 vd[s].next = virtio_rw16(sc, qe->qe_index); 977 s = qe->qe_index; 978 } 979 vd[s].flags = virtio_rw16(sc, 0); 980 981 return 0; 982 } 983 } 984 985 /* 986 * enqueue: enqueue a single dmamap. 987 */ 988 int 989 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 990 bus_dmamap_t dmamap, bool write) 991 { 992 struct vq_entry *qe1 = &vq->vq_entries[slot]; 993 struct vring_desc *vd = qe1->qe_desc_base; 994 int i; 995 int s = qe1->qe_next; 996 997 KASSERT(s >= 0); 998 KASSERT(dmamap->dm_nsegs > 0); 999 1000 for (i = 0; i < dmamap->dm_nsegs; i++) { 1001 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); 1002 vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); 1003 if (!write) 1004 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1005 s = virtio_rw16(sc, vd[s].next); 1006 } 1007 qe1->qe_next = s; 1008 1009 return 0; 1010 } 1011 1012 int 1013 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1014 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 1015 bool write) 1016 { 1017 struct vq_entry *qe1 = &vq->vq_entries[slot]; 1018 struct vring_desc *vd = qe1->qe_desc_base; 1019 int s = qe1->qe_next; 1020 1021 KASSERT(s >= 0); 1022 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 1023 KASSERT((dmamap->dm_segs[0].ds_len > start) && 1024 (dmamap->dm_segs[0].ds_len >= start + len)); 1025 1026 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); 1027 vd[s].len = virtio_rw32(sc, len); 1028 if (!write) 1029 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1030 qe1->qe_next = virtio_rw16(sc, vd[s].next); 1031 1032 return 0; 1033 } 1034 1035 /* 1036 * enqueue_commit: add it to the aring. 1037 */ 1038 int 1039 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1040 bool notifynow) 1041 { 1042 struct vq_entry *qe1; 1043 1044 if (slot < 0) { 1045 mutex_enter(&vq->vq_aring_lock); 1046 goto notify; 1047 } 1048 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 1049 qe1 = &vq->vq_entries[slot]; 1050 if (qe1->qe_indirect) 1051 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 1052 mutex_enter(&vq->vq_aring_lock); 1053 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = 1054 virtio_rw16(sc, slot); 1055 1056 notify: 1057 if (notifynow) { 1058 uint16_t o, n, t; 1059 uint16_t flags; 1060 o = virtio_rw16(sc, vq->vq_avail->idx); 1061 n = vq->vq_avail_idx; 1062 1063 /* publish avail idx */ 1064 membar_producer(); 1065 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx); 1066 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 1067 vq->vq_queued++; 1068 1069 membar_consumer(); 1070 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 1071 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 1072 t = virtio_rw16(sc, *vq->vq_avail_event) + 1; 1073 if ((uint16_t) (n - t) < (uint16_t) (n - o)) 1074 sc->sc_ops->kick(sc, vq->vq_index); 1075 } else { 1076 flags = virtio_rw16(sc, vq->vq_used->flags); 1077 if (!(flags & VRING_USED_F_NO_NOTIFY)) 1078 sc->sc_ops->kick(sc, vq->vq_index); 1079 } 1080 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 1081 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 1082 } 1083 mutex_exit(&vq->vq_aring_lock); 1084 1085 return 0; 1086 } 1087 1088 /* 1089 * enqueue_abort: rollback. 1090 */ 1091 int 1092 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1093 { 1094 struct vq_entry *qe = &vq->vq_entries[slot]; 1095 struct vring_desc *vd; 1096 int s; 1097 1098 if (qe->qe_next < 0) { 1099 vq_free_entry(vq, qe); 1100 return 0; 1101 } 1102 1103 s = slot; 1104 vd = &vq->vq_desc[0]; 1105 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { 1106 s = virtio_rw16(sc, vd[s].next); 1107 vq_free_entry(vq, qe); 1108 qe = &vq->vq_entries[s]; 1109 } 1110 vq_free_entry(vq, qe); 1111 return 0; 1112 } 1113 1114 /* 1115 * Dequeue a request. 1116 */ 1117 /* 1118 * dequeue: dequeue a request from uring; dmamap_sync for uring is 1119 * already done in the interrupt handler. 1120 */ 1121 int 1122 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 1123 int *slotp, int *lenp) 1124 { 1125 uint16_t slot, usedidx; 1126 struct vq_entry *qe; 1127 1128 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) 1129 return ENOENT; 1130 mutex_enter(&vq->vq_uring_lock); 1131 usedidx = vq->vq_used_idx++; 1132 mutex_exit(&vq->vq_uring_lock); 1133 usedidx %= vq->vq_num; 1134 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id); 1135 qe = &vq->vq_entries[slot]; 1136 1137 if (qe->qe_indirect) 1138 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 1139 1140 if (slotp) 1141 *slotp = slot; 1142 if (lenp) 1143 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len); 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * dequeue_commit: complete dequeue; the slot is recycled for future use. 1150 * if you forget to call this the slot will be leaked. 1151 */ 1152 int 1153 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1154 { 1155 struct vq_entry *qe = &vq->vq_entries[slot]; 1156 struct vring_desc *vd = &vq->vq_desc[0]; 1157 int s = slot; 1158 1159 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { 1160 s = virtio_rw16(sc, vd[s].next); 1161 vq_free_entry(vq, qe); 1162 qe = &vq->vq_entries[s]; 1163 } 1164 vq_free_entry(vq, qe); 1165 1166 return 0; 1167 } 1168 1169 /* 1170 * Attach a child, fill all the members. 1171 */ 1172 void 1173 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 1174 struct virtqueue *vqs, 1175 virtio_callback config_change, 1176 virtio_callback intr_hand, 1177 int req_flags, int req_features, const char *feat_bits) 1178 { 1179 char buf[1024]; 1180 1181 sc->sc_child = child; 1182 sc->sc_ipl = ipl; 1183 sc->sc_vqs = vqs; 1184 sc->sc_config_change = config_change; 1185 sc->sc_intrhand = intr_hand; 1186 sc->sc_flags = req_flags; 1187 1188 virtio_negotiate_features(sc, req_features); 1189 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features); 1190 aprint_normal(": features: %s\n", buf); 1191 aprint_naive("\n"); 1192 } 1193 1194 void 1195 virtio_child_attach_set_vqs(struct virtio_softc *sc, 1196 struct virtqueue *vqs, int nvq_pairs) 1197 { 1198 1199 KASSERT(nvq_pairs == 1 || 1200 (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0); 1201 if (nvq_pairs > 1) 1202 sc->sc_child_mq = true; 1203 1204 sc->sc_vqs = vqs; 1205 } 1206 1207 int 1208 virtio_child_attach_finish(struct virtio_softc *sc) 1209 { 1210 int r; 1211 1212 sc->sc_finished_called = true; 1213 r = sc->sc_ops->alloc_interrupts(sc); 1214 if (r != 0) { 1215 aprint_error_dev(sc->sc_dev, "failed to allocate interrupts\n"); 1216 goto fail; 1217 } 1218 1219 r = sc->sc_ops->setup_interrupts(sc, 0); 1220 if (r != 0) { 1221 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); 1222 goto fail; 1223 } 1224 1225 KASSERT(sc->sc_soft_ih == NULL); 1226 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) { 1227 u_int flags = SOFTINT_NET; 1228 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1229 flags |= SOFTINT_MPSAFE; 1230 1231 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 1232 if (sc->sc_soft_ih == NULL) { 1233 sc->sc_ops->free_interrupts(sc); 1234 aprint_error_dev(sc->sc_dev, 1235 "failed to establish soft interrupt\n"); 1236 goto fail; 1237 } 1238 } 1239 1240 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 1241 return 0; 1242 1243 fail: 1244 if (sc->sc_soft_ih) { 1245 softint_disestablish(sc->sc_soft_ih); 1246 sc->sc_soft_ih = NULL; 1247 } 1248 1249 sc->sc_ops->free_interrupts(sc); 1250 1251 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1252 return 1; 1253 } 1254 1255 void 1256 virtio_child_detach(struct virtio_softc *sc) 1257 { 1258 sc->sc_child = NULL; 1259 sc->sc_vqs = NULL; 1260 1261 virtio_device_reset(sc); 1262 1263 sc->sc_ops->free_interrupts(sc); 1264 1265 if (sc->sc_soft_ih) { 1266 softint_disestablish(sc->sc_soft_ih); 1267 sc->sc_soft_ih = NULL; 1268 } 1269 } 1270 1271 void 1272 virtio_child_attach_failed(struct virtio_softc *sc) 1273 { 1274 virtio_child_detach(sc); 1275 1276 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1277 1278 sc->sc_child = VIRTIO_CHILD_FAILED; 1279 } 1280 1281 bus_dma_tag_t 1282 virtio_dmat(struct virtio_softc *sc) 1283 { 1284 return sc->sc_dmat; 1285 } 1286 1287 device_t 1288 virtio_child(struct virtio_softc *sc) 1289 { 1290 return sc->sc_child; 1291 } 1292 1293 int 1294 virtio_intrhand(struct virtio_softc *sc) 1295 { 1296 return (sc->sc_intrhand)(sc); 1297 } 1298 1299 uint64_t 1300 virtio_features(struct virtio_softc *sc) 1301 { 1302 return sc->sc_active_features; 1303 } 1304 1305 int 1306 virtio_attach_failed(struct virtio_softc *sc) 1307 { 1308 device_t self = sc->sc_dev; 1309 1310 /* no error if its not connected, but its failed */ 1311 if (sc->sc_childdevid == 0) 1312 return 1; 1313 1314 if (sc->sc_child == NULL) { 1315 aprint_error_dev(self, 1316 "no matching child driver; not configured\n"); 1317 return 1; 1318 } 1319 1320 if (sc->sc_child == VIRTIO_CHILD_FAILED) { 1321 aprint_error_dev(self, "virtio configuration failed\n"); 1322 return 1; 1323 } 1324 1325 /* sanity check */ 1326 if (!sc->sc_finished_called) { 1327 aprint_error_dev(self, "virtio internal error, child driver " 1328 "signaled OK but didn't initialize interrupts\n"); 1329 return 1; 1330 } 1331 1332 return 0; 1333 } 1334 1335 void 1336 virtio_print_device_type(device_t self, int id, int revision) 1337 { 1338 aprint_normal_dev(self, "%s device (rev. 0x%02x)\n", 1339 (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"), 1340 revision); 1341 } 1342 1343 1344 MODULE(MODULE_CLASS_DRIVER, virtio, NULL); 1345 1346 #ifdef _MODULE 1347 #include "ioconf.c" 1348 #endif 1349 1350 static int 1351 virtio_modcmd(modcmd_t cmd, void *opaque) 1352 { 1353 int error = 0; 1354 1355 #ifdef _MODULE 1356 switch (cmd) { 1357 case MODULE_CMD_INIT: 1358 error = config_init_component(cfdriver_ioconf_virtio, 1359 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1360 break; 1361 case MODULE_CMD_FINI: 1362 error = config_fini_component(cfdriver_ioconf_virtio, 1363 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1364 break; 1365 default: 1366 error = ENOTTY; 1367 break; 1368 } 1369 #endif 1370 1371 return error; 1372 } 1373