1 /* $OpenBSD: virtio_pci.c,v 1.21 2019/01/19 16:23:46 sf Exp $ */ 2 /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/device.h> 33 #include <sys/mutex.h> 34 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 39 #include <dev/pv/virtioreg.h> 40 #include <dev/pv/virtiovar.h> 41 #include <dev/pci/virtio_pcireg.h> 42 43 /* 44 * XXX: Before being used on big endian arches, the access to config registers 45 * XXX: needs to be reviewed/fixed. The non-device specific registers are 46 * XXX: PCI-endian while the device specific registers are native endian. 47 */ 48 49 #define MAX_MSIX_VECS 8 50 51 struct virtio_pci_softc; 52 53 int virtio_pci_match(struct device *, void *, void *); 54 void virtio_pci_attach(struct device *, struct device *, void *); 55 int virtio_pci_detach(struct device *, int); 56 57 void virtio_pci_kick(struct virtio_softc *, uint16_t); 58 uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int); 59 uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int); 60 uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int); 61 uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int); 62 void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t); 63 void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t); 64 void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t); 65 void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t); 66 uint16_t virtio_pci_read_queue_size(struct virtio_softc *, uint16_t); 67 void virtio_pci_setup_queue(struct virtio_softc *, uint16_t, uint32_t); 68 void virtio_pci_set_status(struct virtio_softc *, int); 69 uint32_t virtio_pci_negotiate_features(struct virtio_softc *, uint32_t, 70 const struct virtio_feature_name *); 71 int virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *); 72 int virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int); 73 void virtio_pci_free_irqs(struct virtio_pci_softc *); 74 int virtio_pci_poll_intr(void *); 75 int virtio_pci_legacy_intr(void *); 76 int virtio_pci_legacy_intr_mpsafe(void *); 77 int virtio_pci_config_intr(void *); 78 int virtio_pci_queue_intr(void *); 79 int virtio_pci_shared_queue_intr(void *); 80 81 enum irq_type { 82 IRQ_NO_MSIX, 83 IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */ 84 IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */ 85 }; 86 87 struct virtio_pci_softc { 88 struct virtio_softc sc_sc; 89 pci_chipset_tag_t sc_pc; 90 91 bus_space_tag_t sc_iot; 92 bus_space_handle_t sc_ioh; 93 bus_size_t sc_iosize; 94 95 void *sc_ih[MAX_MSIX_VECS]; 96 97 int sc_config_offset; 98 enum irq_type sc_irq_type; 99 }; 100 101 struct cfattach virtio_pci_ca = { 102 sizeof(struct virtio_pci_softc), 103 virtio_pci_match, 104 virtio_pci_attach, 105 virtio_pci_detach, 106 NULL 107 }; 108 109 struct virtio_ops virtio_pci_ops = { 110 virtio_pci_kick, 111 virtio_pci_read_device_config_1, 112 virtio_pci_read_device_config_2, 113 virtio_pci_read_device_config_4, 114 virtio_pci_read_device_config_8, 115 virtio_pci_write_device_config_1, 116 virtio_pci_write_device_config_2, 117 virtio_pci_write_device_config_4, 118 virtio_pci_write_device_config_8, 119 virtio_pci_read_queue_size, 120 virtio_pci_setup_queue, 121 virtio_pci_set_status, 122 virtio_pci_negotiate_features, 123 virtio_pci_poll_intr, 124 }; 125 126 uint16_t 127 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx) 128 { 129 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 130 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT, 131 idx); 132 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 133 VIRTIO_CONFIG_QUEUE_SIZE); 134 } 135 136 void 137 virtio_pci_setup_queue(struct virtio_softc *vsc, uint16_t idx, uint32_t addr) 138 { 139 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 140 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT, 141 idx); 142 bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS, 143 addr); 144 145 /* 146 * This path is only executed if this function is called after 147 * the child's attach function has finished. In other cases, 148 * it's done in virtio_pci_setup_msix(). 149 */ 150 if (sc->sc_irq_type != IRQ_NO_MSIX) { 151 int vec = 1; 152 if (sc->sc_irq_type == IRQ_MSIX_PER_VQ) 153 vec += idx; 154 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 155 VIRTIO_MSI_QUEUE_VECTOR, vec); 156 } 157 } 158 159 void 160 virtio_pci_set_status(struct virtio_softc *vsc, int status) 161 { 162 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 163 int old = 0; 164 165 if (status != 0) 166 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 167 VIRTIO_CONFIG_DEVICE_STATUS); 168 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS, 169 status|old); 170 } 171 172 int 173 virtio_pci_match(struct device *parent, void *match, void *aux) 174 { 175 struct pci_attach_args *pa; 176 177 pa = (struct pci_attach_args *)aux; 178 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_QUMRANET && 179 PCI_PRODUCT(pa->pa_id) >= 0x1000 && 180 PCI_PRODUCT(pa->pa_id) <= 0x103f && 181 PCI_REVISION(pa->pa_class) == 0) 182 return 1; 183 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD && 184 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL) 185 return 1; 186 return 0; 187 } 188 189 void 190 virtio_pci_attach(struct device *parent, struct device *self, void *aux) 191 { 192 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 193 struct virtio_softc *vsc = &sc->sc_sc; 194 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 195 pci_chipset_tag_t pc = pa->pa_pc; 196 pcitag_t tag = pa->pa_tag; 197 int revision; 198 pcireg_t id; 199 char const *intrstr; 200 pci_intr_handle_t ih; 201 202 revision = PCI_REVISION(pa->pa_class); 203 if (revision != 0) { 204 printf("unknown revision 0x%02x; giving up\n", revision); 205 return; 206 } 207 208 /* subsystem ID shows what I am */ 209 id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 210 211 printf("\n"); 212 213 vsc->sc_ops = &virtio_pci_ops; 214 sc->sc_pc = pc; 215 vsc->sc_dmat = pa->pa_dmat; 216 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 217 sc->sc_irq_type = IRQ_NO_MSIX; 218 219 /* 220 * For virtio, ignore normal MSI black/white-listing depending on the 221 * PCI bridge but enable it unconditionally. 222 */ 223 pa->pa_flags |= PCI_FLAGS_MSI_ENABLED; 224 225 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 226 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) { 227 printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname); 228 return; 229 } 230 231 virtio_device_reset(vsc); 232 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 233 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 234 235 /* XXX: use softc as aux... */ 236 vsc->sc_childdevid = id; 237 vsc->sc_child = NULL; 238 config_found(self, sc, NULL); 239 if (vsc->sc_child == NULL) { 240 printf("%s: no matching child driver; not configured\n", 241 vsc->sc_dev.dv_xname); 242 goto fail_1; 243 } 244 if (vsc->sc_child == VIRTIO_CHILD_ERROR) { 245 printf("%s: virtio configuration failed\n", 246 vsc->sc_dev.dv_xname); 247 goto fail_1; 248 } 249 250 if (virtio_pci_setup_msix(sc, pa, 0) == 0) { 251 sc->sc_irq_type = IRQ_MSIX_PER_VQ; 252 intrstr = "msix per-VQ"; 253 } else if (virtio_pci_setup_msix(sc, pa, 1) == 0) { 254 sc->sc_irq_type = IRQ_MSIX_SHARED; 255 intrstr = "msix shared"; 256 } else { 257 int (*ih_func)(void *) = virtio_pci_legacy_intr; 258 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 259 printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname); 260 goto fail_2; 261 } 262 intrstr = pci_intr_string(pc, ih); 263 /* 264 * We always set the IPL_MPSAFE flag in order to do the relatively 265 * expensive ISR read without lock, and then grab the kernel lock in 266 * the interrupt handler. 267 */ 268 if (vsc->sc_ipl & IPL_MPSAFE) 269 ih_func = virtio_pci_legacy_intr_mpsafe; 270 sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE, 271 ih_func, sc, vsc->sc_dev.dv_xname); 272 if (sc->sc_ih[0] == NULL) { 273 printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname); 274 if (intrstr != NULL) 275 printf(" at %s", intrstr); 276 printf("\n"); 277 goto fail_2; 278 } 279 } 280 printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr); 281 282 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 283 return; 284 285 fail_2: 286 config_detach(vsc->sc_child, 0); 287 fail_1: 288 /* no pci_mapreg_unmap() or pci_intr_unmap() */ 289 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 290 } 291 292 int 293 virtio_pci_detach(struct device *self, int flags) 294 { 295 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 296 struct virtio_softc *vsc = &sc->sc_sc; 297 int r; 298 299 if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) { 300 r = config_detach(vsc->sc_child, flags); 301 if (r) 302 return r; 303 } 304 KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR); 305 KASSERT(vsc->sc_vqs == 0); 306 virtio_pci_free_irqs(sc); 307 if (sc->sc_iosize) 308 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 309 sc->sc_iosize = 0; 310 311 return 0; 312 } 313 314 /* 315 * Feature negotiation. 316 * Prints available / negotiated features if guest_feature_names != NULL and 317 * VIRTIO_DEBUG is 1 318 */ 319 uint32_t 320 virtio_pci_negotiate_features(struct virtio_softc *vsc, uint32_t guest_features, 321 const struct virtio_feature_name *guest_feature_names) 322 { 323 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 324 uint32_t host, neg; 325 326 /* 327 * indirect descriptors can be switched off by setting bit 1 in the 328 * driver flags, see config(8) 329 */ 330 if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) && 331 !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) { 332 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 333 } else { 334 printf("RingIndirectDesc disabled by UKC\n"); 335 } 336 host = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 337 VIRTIO_CONFIG_DEVICE_FEATURES); 338 neg = host & guest_features; 339 #if VIRTIO_DEBUG 340 if (guest_feature_names) 341 virtio_log_features(host, neg, guest_feature_names); 342 #endif 343 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 344 VIRTIO_CONFIG_GUEST_FEATURES, neg); 345 vsc->sc_features = neg; 346 if (neg & VIRTIO_F_RING_INDIRECT_DESC) 347 vsc->sc_indirect = 1; 348 else 349 vsc->sc_indirect = 0; 350 351 return neg; 352 } 353 354 /* 355 * Device configuration registers. 356 */ 357 uint8_t 358 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index) 359 { 360 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 361 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 362 sc->sc_config_offset + index); 363 } 364 365 uint16_t 366 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index) 367 { 368 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 369 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 370 sc->sc_config_offset + index); 371 } 372 373 uint32_t 374 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index) 375 { 376 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 377 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, 378 sc->sc_config_offset + index); 379 } 380 381 uint64_t 382 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index) 383 { 384 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 385 uint64_t r; 386 387 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 388 sc->sc_config_offset + index + sizeof(uint32_t)); 389 r <<= 32; 390 r += bus_space_read_4(sc->sc_iot, sc->sc_ioh, 391 sc->sc_config_offset + index); 392 return r; 393 } 394 395 void 396 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index, 397 uint8_t value) 398 { 399 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 400 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 401 sc->sc_config_offset + index, value); 402 } 403 404 void 405 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index, 406 uint16_t value) 407 { 408 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 409 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 410 sc->sc_config_offset + index, value); 411 } 412 413 void 414 virtio_pci_write_device_config_4(struct virtio_softc *vsc, 415 int index, uint32_t value) 416 { 417 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 418 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 419 sc->sc_config_offset + index, value); 420 } 421 422 void 423 virtio_pci_write_device_config_8(struct virtio_softc *vsc, 424 int index, uint64_t value) 425 { 426 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 427 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 428 sc->sc_config_offset + index, value & 0xffffffff); 429 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 430 sc->sc_config_offset + index + sizeof(uint32_t), value >> 32); 431 } 432 433 int 434 virtio_pci_msix_establish(struct virtio_pci_softc *sc, 435 struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg) 436 { 437 struct virtio_softc *vsc = &sc->sc_sc; 438 pci_intr_handle_t ih; 439 440 if (pci_intr_map_msix(pa, idx, &ih) != 0) { 441 #if VIRTIO_DEBUG 442 printf("%s[%d]: pci_intr_map_msix failed\n", 443 vsc->sc_dev.dv_xname, idx); 444 #endif 445 return 1; 446 } 447 sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl, 448 handler, ih_arg, vsc->sc_dev.dv_xname); 449 if (sc->sc_ih[idx] == NULL) { 450 printf("%s[%d]: couldn't establish msix interrupt\n", 451 vsc->sc_dev.dv_xname, idx); 452 return 1; 453 } 454 return 0; 455 } 456 457 void 458 virtio_pci_free_irqs(struct virtio_pci_softc *sc) 459 { 460 struct virtio_softc *vsc = &sc->sc_sc; 461 int i; 462 463 if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) { 464 for (i = 0; i < vsc->sc_nvqs; i++) { 465 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 466 VIRTIO_CONFIG_QUEUE_SELECT, i); 467 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 468 VIRTIO_MSI_QUEUE_VECTOR, VIRTIO_MSI_NO_VECTOR); 469 } 470 } 471 472 for (i = 0; i < MAX_MSIX_VECS; i++) { 473 if (sc->sc_ih[i]) { 474 pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]); 475 sc->sc_ih[i] = NULL; 476 } 477 } 478 479 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 480 } 481 482 int 483 virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa, 484 int shared) 485 { 486 struct virtio_softc *vsc = &sc->sc_sc; 487 int i; 488 489 if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc)) 490 return 1; 491 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 492 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_MSI_CONFIG_VECTOR, 0); 493 494 if (shared) { 495 if (virtio_pci_msix_establish(sc, pa, 1, 496 virtio_pci_shared_queue_intr, vsc)) { 497 goto fail; 498 } 499 500 for (i = 0; i < vsc->sc_nvqs; i++) { 501 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 502 VIRTIO_CONFIG_QUEUE_SELECT, i); 503 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 504 VIRTIO_MSI_QUEUE_VECTOR, 1); 505 } 506 } else { 507 for (i = 0; i <= vsc->sc_nvqs; i++) { 508 if (virtio_pci_msix_establish(sc, pa, i + 1, 509 virtio_pci_queue_intr, &vsc->sc_vqs[i])) { 510 goto fail; 511 } 512 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 513 VIRTIO_CONFIG_QUEUE_SELECT, i); 514 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 515 VIRTIO_MSI_QUEUE_VECTOR, i + 1); 516 } 517 } 518 519 return 0; 520 fail: 521 virtio_pci_free_irqs(sc); 522 return 1; 523 } 524 525 /* 526 * Interrupt handler. 527 */ 528 529 /* 530 * Only used without MSI-X 531 */ 532 int 533 virtio_pci_legacy_intr(void *arg) 534 { 535 struct virtio_pci_softc *sc = arg; 536 struct virtio_softc *vsc = &sc->sc_sc; 537 int isr, r = 0; 538 539 /* check and ack the interrupt */ 540 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 541 VIRTIO_CONFIG_ISR_STATUS); 542 if (isr == 0) 543 return 0; 544 KERNEL_LOCK(); 545 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 546 (vsc->sc_config_change != NULL)) { 547 r = (vsc->sc_config_change)(vsc); 548 } 549 r |= virtio_check_vqs(vsc); 550 KERNEL_UNLOCK(); 551 552 return r; 553 } 554 555 int 556 virtio_pci_legacy_intr_mpsafe(void *arg) 557 { 558 struct virtio_pci_softc *sc = arg; 559 struct virtio_softc *vsc = &sc->sc_sc; 560 int isr, r = 0; 561 562 /* check and ack the interrupt */ 563 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 564 VIRTIO_CONFIG_ISR_STATUS); 565 if (isr == 0) 566 return 0; 567 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 568 (vsc->sc_config_change != NULL)) { 569 r = (vsc->sc_config_change)(vsc); 570 } 571 r |= virtio_check_vqs(vsc); 572 return r; 573 } 574 575 /* 576 * Only used with MSI-X 577 */ 578 int 579 virtio_pci_config_intr(void *arg) 580 { 581 struct virtio_softc *vsc = arg; 582 583 if (vsc->sc_config_change != NULL) 584 return vsc->sc_config_change(vsc); 585 return 0; 586 } 587 588 /* 589 * Only used with MSI-X 590 */ 591 int 592 virtio_pci_queue_intr(void *arg) 593 { 594 struct virtqueue *vq = arg; 595 596 if (vq->vq_done) 597 return (vq->vq_done)(vq); 598 return 0; 599 } 600 601 int 602 virtio_pci_shared_queue_intr(void *arg) 603 { 604 struct virtio_softc *vsc = arg; 605 606 return virtio_check_vqs(vsc); 607 } 608 609 /* 610 * Interrupt handler to be used when polling. 611 * We cannot use isr here because it is not defined in MSI-X mode. 612 */ 613 int 614 virtio_pci_poll_intr(void *arg) 615 { 616 struct virtio_pci_softc *sc = arg; 617 struct virtio_softc *vsc = &sc->sc_sc; 618 int r = 0; 619 620 if (vsc->sc_config_change != NULL) 621 r = (vsc->sc_config_change)(vsc); 622 623 r |= virtio_check_vqs(vsc); 624 625 return r; 626 } 627 628 void 629 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx) 630 { 631 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 632 bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_NOTIFY, 633 idx); 634 } 635