1 /* $OpenBSD: virtio_pci.c,v 1.23 2019/03/24 18:21:12 sf Exp $ */ 2 /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/device.h> 33 #include <sys/mutex.h> 34 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 39 #include <dev/pv/virtioreg.h> 40 #include <dev/pv/virtiovar.h> 41 #include <dev/pci/virtio_pcireg.h> 42 43 /* 44 * XXX: Before being used on big endian arches, the access to config registers 45 * XXX: needs to be reviewed/fixed. The non-device specific registers are 46 * XXX: PCI-endian while the device specific registers are native endian. 47 */ 48 49 #define MAX_MSIX_VECS 8 50 51 struct virtio_pci_softc; 52 53 int virtio_pci_match(struct device *, void *, void *); 54 void virtio_pci_attach(struct device *, struct device *, void *); 55 int virtio_pci_detach(struct device *, int); 56 57 void virtio_pci_kick(struct virtio_softc *, uint16_t); 58 uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int); 59 uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int); 60 uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int); 61 uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int); 62 void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t); 63 void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t); 64 void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t); 65 void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t); 66 uint16_t virtio_pci_read_queue_size(struct virtio_softc *, uint16_t); 67 void virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t); 68 void virtio_pci_set_status(struct virtio_softc *, int); 69 uint64_t virtio_pci_negotiate_features(struct virtio_softc *, uint64_t, 70 const struct virtio_feature_name *); 71 int virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *); 72 int virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int); 73 void virtio_pci_free_irqs(struct virtio_pci_softc *); 74 int virtio_pci_poll_intr(void *); 75 int virtio_pci_legacy_intr(void *); 76 int virtio_pci_legacy_intr_mpsafe(void *); 77 int virtio_pci_config_intr(void *); 78 int virtio_pci_queue_intr(void *); 79 int virtio_pci_shared_queue_intr(void *); 80 81 enum irq_type { 82 IRQ_NO_MSIX, 83 IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */ 84 IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */ 85 }; 86 87 struct virtio_pci_softc { 88 struct virtio_softc sc_sc; 89 pci_chipset_tag_t sc_pc; 90 91 bus_space_tag_t sc_iot; 92 bus_space_handle_t sc_ioh; 93 bus_size_t sc_iosize; 94 95 void *sc_ih[MAX_MSIX_VECS]; 96 97 int sc_config_offset; 98 enum irq_type sc_irq_type; 99 }; 100 101 struct cfattach virtio_pci_ca = { 102 sizeof(struct virtio_pci_softc), 103 virtio_pci_match, 104 virtio_pci_attach, 105 virtio_pci_detach, 106 NULL 107 }; 108 109 struct virtio_ops virtio_pci_ops = { 110 virtio_pci_kick, 111 virtio_pci_read_device_config_1, 112 virtio_pci_read_device_config_2, 113 virtio_pci_read_device_config_4, 114 virtio_pci_read_device_config_8, 115 virtio_pci_write_device_config_1, 116 virtio_pci_write_device_config_2, 117 virtio_pci_write_device_config_4, 118 virtio_pci_write_device_config_8, 119 virtio_pci_read_queue_size, 120 virtio_pci_setup_queue, 121 virtio_pci_set_status, 122 virtio_pci_negotiate_features, 123 virtio_pci_poll_intr, 124 }; 125 126 uint16_t 127 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx) 128 { 129 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 130 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT, 131 idx); 132 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 133 VIRTIO_CONFIG_QUEUE_SIZE); 134 } 135 136 void 137 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq, 138 uint64_t addr) 139 { 140 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 141 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT, 142 vq->vq_index); 143 bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS, 144 addr / VIRTIO_PAGE_SIZE); 145 146 /* 147 * This path is only executed if this function is called after 148 * the child's attach function has finished. In other cases, 149 * it's done in virtio_pci_setup_msix(). 150 */ 151 if (sc->sc_irq_type != IRQ_NO_MSIX) { 152 int vec = 1; 153 if (sc->sc_irq_type == IRQ_MSIX_PER_VQ) 154 vec += vq->vq_index; 155 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 156 VIRTIO_MSI_QUEUE_VECTOR, vec); 157 } 158 } 159 160 void 161 virtio_pci_set_status(struct virtio_softc *vsc, int status) 162 { 163 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 164 int old = 0; 165 166 if (status != 0) 167 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 168 VIRTIO_CONFIG_DEVICE_STATUS); 169 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS, 170 status|old); 171 } 172 173 int 174 virtio_pci_match(struct device *parent, void *match, void *aux) 175 { 176 struct pci_attach_args *pa; 177 178 pa = (struct pci_attach_args *)aux; 179 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_QUMRANET && 180 PCI_PRODUCT(pa->pa_id) >= 0x1000 && 181 PCI_PRODUCT(pa->pa_id) <= 0x103f && 182 PCI_REVISION(pa->pa_class) == 0) 183 return 1; 184 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD && 185 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL) 186 return 1; 187 return 0; 188 } 189 190 void 191 virtio_pci_attach(struct device *parent, struct device *self, void *aux) 192 { 193 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 194 struct virtio_softc *vsc = &sc->sc_sc; 195 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 196 pci_chipset_tag_t pc = pa->pa_pc; 197 pcitag_t tag = pa->pa_tag; 198 int revision; 199 pcireg_t id; 200 char const *intrstr; 201 pci_intr_handle_t ih; 202 203 revision = PCI_REVISION(pa->pa_class); 204 if (revision != 0) { 205 printf("unknown revision 0x%02x; giving up\n", revision); 206 return; 207 } 208 209 /* subsystem ID shows what I am */ 210 id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 211 212 printf("\n"); 213 214 vsc->sc_ops = &virtio_pci_ops; 215 sc->sc_pc = pc; 216 vsc->sc_dmat = pa->pa_dmat; 217 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 218 sc->sc_irq_type = IRQ_NO_MSIX; 219 220 /* 221 * For virtio, ignore normal MSI black/white-listing depending on the 222 * PCI bridge but enable it unconditionally. 223 */ 224 pa->pa_flags |= PCI_FLAGS_MSI_ENABLED; 225 226 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 227 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) { 228 printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname); 229 return; 230 } 231 232 virtio_device_reset(vsc); 233 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 234 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 235 236 /* XXX: use softc as aux... */ 237 vsc->sc_childdevid = id; 238 vsc->sc_child = NULL; 239 config_found(self, sc, NULL); 240 if (vsc->sc_child == NULL) { 241 printf("%s: no matching child driver; not configured\n", 242 vsc->sc_dev.dv_xname); 243 goto fail_1; 244 } 245 if (vsc->sc_child == VIRTIO_CHILD_ERROR) { 246 printf("%s: virtio configuration failed\n", 247 vsc->sc_dev.dv_xname); 248 goto fail_1; 249 } 250 251 if (virtio_pci_setup_msix(sc, pa, 0) == 0) { 252 sc->sc_irq_type = IRQ_MSIX_PER_VQ; 253 intrstr = "msix per-VQ"; 254 } else if (virtio_pci_setup_msix(sc, pa, 1) == 0) { 255 sc->sc_irq_type = IRQ_MSIX_SHARED; 256 intrstr = "msix shared"; 257 } else { 258 int (*ih_func)(void *) = virtio_pci_legacy_intr; 259 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 260 printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname); 261 goto fail_2; 262 } 263 intrstr = pci_intr_string(pc, ih); 264 /* 265 * We always set the IPL_MPSAFE flag in order to do the relatively 266 * expensive ISR read without lock, and then grab the kernel lock in 267 * the interrupt handler. 268 */ 269 if (vsc->sc_ipl & IPL_MPSAFE) 270 ih_func = virtio_pci_legacy_intr_mpsafe; 271 sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE, 272 ih_func, sc, vsc->sc_dev.dv_xname); 273 if (sc->sc_ih[0] == NULL) { 274 printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname); 275 if (intrstr != NULL) 276 printf(" at %s", intrstr); 277 printf("\n"); 278 goto fail_2; 279 } 280 } 281 printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr); 282 283 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 284 return; 285 286 fail_2: 287 config_detach(vsc->sc_child, 0); 288 fail_1: 289 /* no pci_mapreg_unmap() or pci_intr_unmap() */ 290 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 291 } 292 293 int 294 virtio_pci_detach(struct device *self, int flags) 295 { 296 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 297 struct virtio_softc *vsc = &sc->sc_sc; 298 int r; 299 300 if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) { 301 r = config_detach(vsc->sc_child, flags); 302 if (r) 303 return r; 304 } 305 KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR); 306 KASSERT(vsc->sc_vqs == 0); 307 virtio_pci_free_irqs(sc); 308 if (sc->sc_iosize) 309 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 310 sc->sc_iosize = 0; 311 312 return 0; 313 } 314 315 /* 316 * Feature negotiation. 317 * Prints available / negotiated features if guest_feature_names != NULL and 318 * VIRTIO_DEBUG is 1 319 */ 320 uint64_t 321 virtio_pci_negotiate_features(struct virtio_softc *vsc, uint64_t guest_features, 322 const struct virtio_feature_name *guest_feature_names) 323 { 324 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 325 uint64_t host, neg; 326 327 /* 328 * indirect descriptors can be switched off by setting bit 1 in the 329 * driver flags, see config(8) 330 */ 331 if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) && 332 !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) { 333 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 334 } else { 335 printf("RingIndirectDesc disabled by UKC\n"); 336 } 337 host = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 338 VIRTIO_CONFIG_DEVICE_FEATURES); 339 neg = host & guest_features; 340 #if VIRTIO_DEBUG 341 if (guest_feature_names) 342 virtio_log_features(host, neg, guest_feature_names); 343 #endif 344 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 345 VIRTIO_CONFIG_GUEST_FEATURES, neg); 346 vsc->sc_features = neg; 347 if (neg & VIRTIO_F_RING_INDIRECT_DESC) 348 vsc->sc_indirect = 1; 349 else 350 vsc->sc_indirect = 0; 351 352 return neg; 353 } 354 355 /* 356 * Device configuration registers. 357 */ 358 uint8_t 359 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index) 360 { 361 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 362 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 363 sc->sc_config_offset + index); 364 } 365 366 uint16_t 367 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index) 368 { 369 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 370 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 371 sc->sc_config_offset + index); 372 } 373 374 uint32_t 375 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index) 376 { 377 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 378 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, 379 sc->sc_config_offset + index); 380 } 381 382 uint64_t 383 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index) 384 { 385 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 386 uint64_t r; 387 388 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 389 sc->sc_config_offset + index + sizeof(uint32_t)); 390 r <<= 32; 391 r += bus_space_read_4(sc->sc_iot, sc->sc_ioh, 392 sc->sc_config_offset + index); 393 return r; 394 } 395 396 void 397 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index, 398 uint8_t value) 399 { 400 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 401 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 402 sc->sc_config_offset + index, value); 403 } 404 405 void 406 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index, 407 uint16_t value) 408 { 409 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 410 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 411 sc->sc_config_offset + index, value); 412 } 413 414 void 415 virtio_pci_write_device_config_4(struct virtio_softc *vsc, 416 int index, uint32_t value) 417 { 418 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 419 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 420 sc->sc_config_offset + index, value); 421 } 422 423 void 424 virtio_pci_write_device_config_8(struct virtio_softc *vsc, 425 int index, uint64_t value) 426 { 427 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 428 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 429 sc->sc_config_offset + index, value & 0xffffffff); 430 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 431 sc->sc_config_offset + index + sizeof(uint32_t), value >> 32); 432 } 433 434 int 435 virtio_pci_msix_establish(struct virtio_pci_softc *sc, 436 struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg) 437 { 438 struct virtio_softc *vsc = &sc->sc_sc; 439 pci_intr_handle_t ih; 440 441 if (pci_intr_map_msix(pa, idx, &ih) != 0) { 442 #if VIRTIO_DEBUG 443 printf("%s[%d]: pci_intr_map_msix failed\n", 444 vsc->sc_dev.dv_xname, idx); 445 #endif 446 return 1; 447 } 448 sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl, 449 handler, ih_arg, vsc->sc_dev.dv_xname); 450 if (sc->sc_ih[idx] == NULL) { 451 printf("%s[%d]: couldn't establish msix interrupt\n", 452 vsc->sc_dev.dv_xname, idx); 453 return 1; 454 } 455 return 0; 456 } 457 458 void 459 virtio_pci_free_irqs(struct virtio_pci_softc *sc) 460 { 461 struct virtio_softc *vsc = &sc->sc_sc; 462 int i; 463 464 if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) { 465 for (i = 0; i < vsc->sc_nvqs; i++) { 466 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 467 VIRTIO_CONFIG_QUEUE_SELECT, i); 468 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 469 VIRTIO_MSI_QUEUE_VECTOR, VIRTIO_MSI_NO_VECTOR); 470 } 471 } 472 473 for (i = 0; i < MAX_MSIX_VECS; i++) { 474 if (sc->sc_ih[i]) { 475 pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]); 476 sc->sc_ih[i] = NULL; 477 } 478 } 479 480 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 481 } 482 483 int 484 virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa, 485 int shared) 486 { 487 struct virtio_softc *vsc = &sc->sc_sc; 488 int i; 489 490 if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc)) 491 return 1; 492 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 493 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_MSI_CONFIG_VECTOR, 0); 494 495 if (shared) { 496 if (virtio_pci_msix_establish(sc, pa, 1, 497 virtio_pci_shared_queue_intr, vsc)) { 498 goto fail; 499 } 500 501 for (i = 0; i < vsc->sc_nvqs; i++) { 502 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 503 VIRTIO_CONFIG_QUEUE_SELECT, i); 504 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 505 VIRTIO_MSI_QUEUE_VECTOR, 1); 506 } 507 } else { 508 for (i = 0; i <= vsc->sc_nvqs; i++) { 509 if (virtio_pci_msix_establish(sc, pa, i + 1, 510 virtio_pci_queue_intr, &vsc->sc_vqs[i])) { 511 goto fail; 512 } 513 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 514 VIRTIO_CONFIG_QUEUE_SELECT, i); 515 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 516 VIRTIO_MSI_QUEUE_VECTOR, i + 1); 517 } 518 } 519 520 return 0; 521 fail: 522 virtio_pci_free_irqs(sc); 523 return 1; 524 } 525 526 /* 527 * Interrupt handler. 528 */ 529 530 /* 531 * Only used without MSI-X 532 */ 533 int 534 virtio_pci_legacy_intr(void *arg) 535 { 536 struct virtio_pci_softc *sc = arg; 537 struct virtio_softc *vsc = &sc->sc_sc; 538 int isr, r = 0; 539 540 /* check and ack the interrupt */ 541 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 542 VIRTIO_CONFIG_ISR_STATUS); 543 if (isr == 0) 544 return 0; 545 KERNEL_LOCK(); 546 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 547 (vsc->sc_config_change != NULL)) { 548 r = (vsc->sc_config_change)(vsc); 549 } 550 r |= virtio_check_vqs(vsc); 551 KERNEL_UNLOCK(); 552 553 return r; 554 } 555 556 int 557 virtio_pci_legacy_intr_mpsafe(void *arg) 558 { 559 struct virtio_pci_softc *sc = arg; 560 struct virtio_softc *vsc = &sc->sc_sc; 561 int isr, r = 0; 562 563 /* check and ack the interrupt */ 564 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 565 VIRTIO_CONFIG_ISR_STATUS); 566 if (isr == 0) 567 return 0; 568 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 569 (vsc->sc_config_change != NULL)) { 570 r = (vsc->sc_config_change)(vsc); 571 } 572 r |= virtio_check_vqs(vsc); 573 return r; 574 } 575 576 /* 577 * Only used with MSI-X 578 */ 579 int 580 virtio_pci_config_intr(void *arg) 581 { 582 struct virtio_softc *vsc = arg; 583 584 if (vsc->sc_config_change != NULL) 585 return vsc->sc_config_change(vsc); 586 return 0; 587 } 588 589 /* 590 * Only used with MSI-X 591 */ 592 int 593 virtio_pci_queue_intr(void *arg) 594 { 595 struct virtqueue *vq = arg; 596 597 if (vq->vq_done) 598 return (vq->vq_done)(vq); 599 return 0; 600 } 601 602 int 603 virtio_pci_shared_queue_intr(void *arg) 604 { 605 struct virtio_softc *vsc = arg; 606 607 return virtio_check_vqs(vsc); 608 } 609 610 /* 611 * Interrupt handler to be used when polling. 612 * We cannot use isr here because it is not defined in MSI-X mode. 613 */ 614 int 615 virtio_pci_poll_intr(void *arg) 616 { 617 struct virtio_pci_softc *sc = arg; 618 struct virtio_softc *vsc = &sc->sc_sc; 619 int r = 0; 620 621 if (vsc->sc_config_change != NULL) 622 r = (vsc->sc_config_change)(vsc); 623 624 r |= virtio_check_vqs(vsc); 625 626 return r; 627 } 628 629 void 630 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx) 631 { 632 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 633 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_NOTIFY, 634 idx); 635 } 636