1 /* $OpenBSD: virtio_pci.c,v 1.50 2025/01/14 14:28:38 sf Exp $ */ 2 /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/device.h> 33 #include <sys/mutex.h> 34 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 #include <dev/pci/virtio_pcireg.h> 39 40 #include <dev/pv/virtioreg.h> 41 #include <dev/pv/virtiovar.h> 42 43 #define DNPRINTF(n,x...) \ 44 do { if (VIRTIO_DEBUG >= n) printf(x); } while(0) 45 46 47 /* 48 * XXX: Before being used on big endian arches, the access to config registers 49 * XXX: needs to be reviewed/fixed. The non-device specific registers are 50 * XXX: PCI-endian while the device specific registers are native endian. 51 */ 52 53 #define MAX_MSIX_VECS 16 54 55 struct virtio_pci_softc; 56 struct virtio_pci_attach_args; 57 58 int virtio_pci_match(struct device *, void *, void *); 59 void virtio_pci_attach(struct device *, struct device *, void *); 60 int virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa); 61 int virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa); 62 int virtio_pci_detach(struct device *, int); 63 64 void virtio_pci_kick(struct virtio_softc *, uint16_t); 65 int virtio_pci_adjust_config_region(struct virtio_pci_softc *, int offset); 66 uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int); 67 uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int); 68 uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int); 69 uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int); 70 void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t); 71 void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t); 72 void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t); 73 void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t); 74 uint16_t virtio_pci_read_queue_size(struct virtio_softc *, uint16_t); 75 void virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t); 76 void virtio_pci_setup_intrs(struct virtio_softc *); 77 int virtio_pci_attach_finish(struct virtio_softc *, struct virtio_attach_args *); 78 int virtio_pci_get_status(struct virtio_softc *); 79 void virtio_pci_set_status(struct virtio_softc *, int); 80 int virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *); 81 int virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *); 82 void virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t); 83 void virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t); 84 int virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, struct cpu_info *, int (*)(void *), void *); 85 int virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int); 86 void virtio_pci_intr_barrier(struct virtio_softc *); 87 int virtio_pci_intr_establish(struct virtio_softc *, struct virtio_attach_args *, int, struct cpu_info *, int (*)(void *), void *); 88 void virtio_pci_free_irqs(struct virtio_pci_softc *); 89 int virtio_pci_poll_intr(void *); 90 int virtio_pci_legacy_intr(void *); 91 int virtio_pci_legacy_intr_mpsafe(void *); 92 int virtio_pci_config_intr(void *); 93 int virtio_pci_queue_intr(void *); 94 int virtio_pci_shared_queue_intr(void *); 95 int virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen); 96 #if VIRTIO_DEBUG 97 void virtio_pci_dump_caps(struct virtio_pci_softc *sc); 98 #endif 99 100 enum irq_type { 101 IRQ_NO_MSIX, 102 IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */ 103 IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */ 104 IRQ_MSIX_CHILD, /* assigned by child driver */ 105 }; 106 107 struct virtio_pci_intr { 108 char name[16]; 109 void *ih; 110 }; 111 112 struct virtio_pci_softc { 113 struct virtio_softc sc_sc; 114 pci_chipset_tag_t sc_pc; 115 pcitag_t sc_ptag; 116 117 bus_space_tag_t sc_iot; 118 bus_space_handle_t sc_ioh; 119 bus_size_t sc_iosize; 120 121 bus_space_tag_t sc_bars_iot[4]; 122 bus_space_handle_t sc_bars_ioh[4]; 123 bus_size_t sc_bars_iosize[4]; 124 125 bus_space_tag_t sc_notify_iot; 126 bus_space_handle_t sc_notify_ioh; 127 bus_size_t sc_notify_iosize; 128 unsigned int sc_notify_off_multiplier; 129 130 bus_space_tag_t sc_devcfg_iot; 131 bus_space_handle_t sc_devcfg_ioh; 132 bus_size_t sc_devcfg_iosize; 133 /* 134 * With 0.9, the offset of the devcfg region in the io bar changes 135 * depending on MSI-X being enabled or not. 136 * With 1.0, this field is still used to remember if MSI-X is enabled 137 * or not. 138 */ 139 unsigned int sc_devcfg_offset; 140 141 bus_space_tag_t sc_isr_iot; 142 bus_space_handle_t sc_isr_ioh; 143 bus_size_t sc_isr_iosize; 144 145 struct virtio_pci_intr *sc_intr; 146 int sc_nintr; 147 148 enum irq_type sc_irq_type; 149 }; 150 151 struct virtio_pci_attach_args { 152 struct virtio_attach_args vpa_va; 153 struct pci_attach_args *vpa_pa; 154 }; 155 156 157 const struct cfattach virtio_pci_ca = { 158 sizeof(struct virtio_pci_softc), 159 virtio_pci_match, 160 virtio_pci_attach, 161 virtio_pci_detach, 162 NULL 163 }; 164 165 const struct virtio_ops virtio_pci_ops = { 166 virtio_pci_kick, 167 virtio_pci_read_device_config_1, 168 virtio_pci_read_device_config_2, 169 virtio_pci_read_device_config_4, 170 virtio_pci_read_device_config_8, 171 virtio_pci_write_device_config_1, 172 virtio_pci_write_device_config_2, 173 virtio_pci_write_device_config_4, 174 virtio_pci_write_device_config_8, 175 virtio_pci_read_queue_size, 176 virtio_pci_setup_queue, 177 virtio_pci_setup_intrs, 178 virtio_pci_get_status, 179 virtio_pci_set_status, 180 virtio_pci_negotiate_features, 181 virtio_pci_attach_finish, 182 virtio_pci_poll_intr, 183 virtio_pci_intr_barrier, 184 virtio_pci_intr_establish, 185 }; 186 187 static inline uint64_t 188 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size) 189 { 190 uint64_t val; 191 switch (size) { 192 case 1: 193 val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off); 194 break; 195 case 2: 196 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off); 197 break; 198 case 4: 199 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 200 break; 201 case 8: 202 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 203 off + sizeof(uint32_t)); 204 val <<= 32; 205 val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 206 break; 207 } 208 return val; 209 } 210 211 #define CREAD(sc, memb) _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \ 212 sizeof(((struct virtio_pci_common_cfg *)0)->memb)) 213 214 #define CWRITE(sc, memb, val) \ 215 do { \ 216 struct virtio_pci_common_cfg c; \ 217 size_t off = offsetof(struct virtio_pci_common_cfg, memb); \ 218 size_t size = sizeof(c.memb); \ 219 \ 220 DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n", \ 221 __func__, __LINE__, off, size, (unsigned long long)val); \ 222 switch (size) { \ 223 case 1: \ 224 bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val); \ 225 break; \ 226 case 2: \ 227 bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val); \ 228 break; \ 229 case 4: \ 230 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); \ 231 break; \ 232 case 8: \ 233 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, \ 234 (val) & 0xffffffff); \ 235 bus_space_write_4(sc->sc_iot, sc->sc_ioh, \ 236 (off) + sizeof(uint32_t), (uint64_t)(val) >> 32); \ 237 break; \ 238 } \ 239 } while (0) 240 241 uint16_t 242 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx) 243 { 244 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 245 uint16_t ret; 246 if (sc->sc_sc.sc_version_1) { 247 CWRITE(sc, queue_select, idx); 248 ret = CREAD(sc, queue_size); 249 } else { 250 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 251 VIRTIO_CONFIG_QUEUE_SELECT, idx); 252 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 253 VIRTIO_CONFIG_QUEUE_SIZE); 254 } 255 return ret; 256 } 257 258 void 259 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq, 260 uint64_t addr) 261 { 262 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 263 if (sc->sc_sc.sc_version_1) { 264 CWRITE(sc, queue_select, vq->vq_index); 265 if (addr == 0) { 266 CWRITE(sc, queue_enable, 0); 267 CWRITE(sc, queue_desc, 0); 268 CWRITE(sc, queue_avail, 0); 269 CWRITE(sc, queue_used, 0); 270 } else { 271 CWRITE(sc, queue_desc, addr); 272 CWRITE(sc, queue_avail, addr + vq->vq_availoffset); 273 CWRITE(sc, queue_used, addr + vq->vq_usedoffset); 274 CWRITE(sc, queue_enable, 1); 275 vq->vq_notify_off = CREAD(sc, queue_notify_off); 276 } 277 } else { 278 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 279 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index); 280 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 281 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 282 } 283 } 284 285 void 286 virtio_pci_setup_intrs(struct virtio_softc *vsc) 287 { 288 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 289 int i; 290 291 if (sc->sc_irq_type == IRQ_NO_MSIX) 292 return; 293 294 for (i = 0; i < vsc->sc_nvqs; i++) { 295 unsigned vec = vsc->sc_vqs[i].vq_intr_vec; 296 virtio_pci_set_msix_queue_vector(sc, i, vec); 297 } 298 if (vsc->sc_config_change) 299 virtio_pci_set_msix_config_vector(sc, 0); 300 } 301 302 int 303 virtio_pci_get_status(struct virtio_softc *vsc) 304 { 305 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 306 307 if (sc->sc_sc.sc_version_1) 308 return CREAD(sc, device_status); 309 else 310 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 311 VIRTIO_CONFIG_DEVICE_STATUS); 312 } 313 314 void 315 virtio_pci_set_status(struct virtio_softc *vsc, int status) 316 { 317 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 318 int old = 0; 319 320 if (sc->sc_sc.sc_version_1) { 321 if (status == 0) { 322 CWRITE(sc, device_status, 0); 323 while (CREAD(sc, device_status) != 0) { 324 CPU_BUSY_CYCLE(); 325 } 326 } else { 327 old = CREAD(sc, device_status); 328 CWRITE(sc, device_status, status|old); 329 } 330 } else { 331 if (status == 0) { 332 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 333 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 334 while (bus_space_read_1(sc->sc_iot, sc->sc_ioh, 335 VIRTIO_CONFIG_DEVICE_STATUS) != 0) { 336 CPU_BUSY_CYCLE(); 337 } 338 } else { 339 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 340 VIRTIO_CONFIG_DEVICE_STATUS); 341 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 342 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 343 } 344 } 345 } 346 347 int 348 virtio_pci_match(struct device *parent, void *match, void *aux) 349 { 350 struct pci_attach_args *pa; 351 352 pa = (struct pci_attach_args *)aux; 353 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD && 354 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL) 355 return 1; 356 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET) 357 return 0; 358 /* virtio 0.9 */ 359 if (PCI_PRODUCT(pa->pa_id) >= 0x1000 && 360 PCI_PRODUCT(pa->pa_id) <= 0x103f && 361 PCI_REVISION(pa->pa_class) == 0) 362 return 1; 363 /* virtio 1.0 */ 364 if (PCI_PRODUCT(pa->pa_id) >= 0x1040 && 365 PCI_PRODUCT(pa->pa_id) <= 0x107f && 366 PCI_REVISION(pa->pa_class) == 1) 367 return 1; 368 return 0; 369 } 370 371 #if VIRTIO_DEBUG 372 void 373 virtio_pci_dump_caps(struct virtio_pci_softc *sc) 374 { 375 pci_chipset_tag_t pc = sc->sc_pc; 376 pcitag_t tag = sc->sc_ptag; 377 int offset; 378 union { 379 pcireg_t reg[4]; 380 struct virtio_pci_cap vcap; 381 } v; 382 383 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0])) 384 return; 385 386 printf("\n"); 387 do { 388 for (int i = 0; i < 4; i++) 389 v.reg[i] = pci_conf_read(pc, tag, offset + i * 4); 390 printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n", 391 __func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar, 392 v.vcap.offset, v.vcap.length); 393 offset = v.vcap.cap_next; 394 } while (offset != 0); 395 } 396 #endif 397 398 int 399 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen) 400 { 401 pci_chipset_tag_t pc = sc->sc_pc; 402 pcitag_t tag = sc->sc_ptag; 403 unsigned int offset, i, len; 404 union { 405 pcireg_t reg[8]; 406 struct virtio_pci_cap vcap; 407 } *v = buf; 408 409 if (buflen < sizeof(struct virtio_pci_cap)) 410 return ERANGE; 411 412 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 413 return ENOENT; 414 415 do { 416 for (i = 0; i < 4; i++) 417 v->reg[i] = pci_conf_read(pc, tag, offset + i * 4); 418 if (v->vcap.cfg_type == cfg_type) 419 break; 420 offset = v->vcap.cap_next; 421 } while (offset != 0); 422 423 if (offset == 0) 424 return ENOENT; 425 426 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 427 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 428 if (len > buflen) { 429 printf("%s: cap too large\n", __func__); 430 return ERANGE; 431 } 432 for (i = 4; i < len / sizeof(pcireg_t); i++) 433 v->reg[i] = pci_conf_read(pc, tag, offset + i * 4); 434 } 435 436 return 0; 437 } 438 439 440 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 441 sizeof(pcireg_t)) 442 443 int 444 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa) 445 { 446 struct virtio_pci_cap common, isr, device; 447 struct virtio_pci_notify_cap notify; 448 int have_device_cfg = 0; 449 bus_size_t bars[NMAPREG] = { 0 }; 450 int bars_idx[NMAPREG] = { 0 }; 451 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 452 int i, j = 0, ret = 0; 453 454 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0) 455 return ENODEV; 456 457 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, ¬ify, sizeof(notify)) != 0) 458 return ENODEV; 459 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0) 460 return ENODEV; 461 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0) 462 memset(&device, 0, sizeof(device)); 463 else 464 have_device_cfg = 1; 465 466 /* 467 * XXX Maybe there are devices that offer the pci caps but not the 468 * XXX VERSION_1 feature bit? Then we should check the feature bit 469 * XXX here and fall back to 0.9 out if not present. 470 */ 471 472 /* Figure out which bars we need to map */ 473 for (i = 0; i < nitems(caps); i++) { 474 int bar = caps[i]->bar; 475 bus_size_t len = caps[i]->offset + caps[i]->length; 476 if (caps[i]->length == 0) 477 continue; 478 if (bars[bar] < len) 479 bars[bar] = len; 480 } 481 482 for (i = 0; i < nitems(bars); i++) { 483 int reg; 484 pcireg_t type; 485 if (bars[i] == 0) 486 continue; 487 reg = PCI_MAPREG_START + i * 4; 488 type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg); 489 if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j], 490 &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j], 491 bars[i])) { 492 printf("%s: can't map bar %u \n", 493 sc->sc_sc.sc_dev.dv_xname, i); 494 ret = EIO; 495 goto err; 496 } 497 bars_idx[i] = j; 498 j++; 499 } 500 501 i = bars_idx[notify.cap.bar]; 502 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 503 notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) { 504 printf("%s: can't map notify i/o space\n", 505 sc->sc_sc.sc_dev.dv_xname); 506 ret = EIO; 507 goto err; 508 } 509 sc->sc_notify_iosize = notify.cap.length; 510 sc->sc_notify_iot = sc->sc_bars_iot[i]; 511 sc->sc_notify_off_multiplier = notify.notify_off_multiplier; 512 513 if (have_device_cfg) { 514 i = bars_idx[device.bar]; 515 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 516 device.offset, device.length, &sc->sc_devcfg_ioh) != 0) { 517 printf("%s: can't map devcfg i/o space\n", 518 sc->sc_sc.sc_dev.dv_xname); 519 ret = EIO; 520 goto err; 521 } 522 sc->sc_devcfg_iosize = device.length; 523 sc->sc_devcfg_iot = sc->sc_bars_iot[i]; 524 } 525 526 i = bars_idx[isr.bar]; 527 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 528 isr.offset, isr.length, &sc->sc_isr_ioh) != 0) { 529 printf("%s: can't map isr i/o space\n", 530 sc->sc_sc.sc_dev.dv_xname); 531 ret = EIO; 532 goto err; 533 } 534 sc->sc_isr_iosize = isr.length; 535 sc->sc_isr_iot = sc->sc_bars_iot[i]; 536 537 i = bars_idx[common.bar]; 538 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 539 common.offset, common.length, &sc->sc_ioh) != 0) { 540 printf("%s: can't map common i/o space\n", 541 sc->sc_sc.sc_dev.dv_xname); 542 ret = EIO; 543 goto err; 544 } 545 sc->sc_iosize = common.length; 546 sc->sc_iot = sc->sc_bars_iot[i]; 547 548 sc->sc_sc.sc_version_1 = 1; 549 return 0; 550 551 err: 552 /* there is no pci_mapreg_unmap() */ 553 return ret; 554 } 555 556 int 557 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa) 558 { 559 struct virtio_softc *vsc = &sc->sc_sc; 560 pcireg_t type; 561 562 type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 563 if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0, 564 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) { 565 printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname); 566 return EIO; 567 } 568 569 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 570 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) { 571 printf("%s: can't map notify i/o space\n", 572 vsc->sc_dev.dv_xname); 573 return EIO; 574 } 575 sc->sc_notify_iosize = 2; 576 sc->sc_notify_iot = sc->sc_iot; 577 578 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 579 VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) { 580 printf("%s: can't map isr i/o space\n", 581 vsc->sc_dev.dv_xname); 582 return EIO; 583 } 584 sc->sc_isr_iosize = 1; 585 sc->sc_isr_iot = sc->sc_iot; 586 587 return 0; 588 } 589 590 void 591 virtio_pci_attach(struct device *parent, struct device *self, void *aux) 592 { 593 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 594 struct virtio_softc *vsc = &sc->sc_sc; 595 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 596 pci_chipset_tag_t pc = pa->pa_pc; 597 pcitag_t tag = pa->pa_tag; 598 int revision, ret = ENODEV; 599 pcireg_t id; 600 struct virtio_pci_attach_args vpa = { { 0 }, pa }; 601 602 revision = PCI_REVISION(pa->pa_class); 603 switch (revision) { 604 case 0: 605 /* subsystem ID shows what I am */ 606 id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 607 break; 608 case 1: 609 id = PCI_PRODUCT(pa->pa_id) - 0x1040; 610 break; 611 default: 612 printf("unknown revision 0x%02x; giving up\n", revision); 613 return; 614 } 615 616 sc->sc_pc = pc; 617 sc->sc_ptag = pa->pa_tag; 618 vsc->sc_dmat = pa->pa_dmat; 619 620 #if defined(__i386__) || defined(__amd64__) 621 /* 622 * For virtio, ignore normal MSI black/white-listing depending on the 623 * PCI bridge but enable it unconditionally. 624 */ 625 pa->pa_flags |= PCI_FLAGS_MSI_ENABLED; 626 #endif 627 628 #if VIRTIO_DEBUG 629 virtio_pci_dump_caps(sc); 630 #endif 631 632 sc->sc_nintr = min(MAX_MSIX_VECS, pci_intr_msix_count(pa)); 633 sc->sc_nintr = max(sc->sc_nintr, 1); 634 vpa.vpa_va.va_nintr = sc->sc_nintr; 635 636 sc->sc_intr = mallocarray(sc->sc_nintr, sizeof(*sc->sc_intr), 637 M_DEVBUF, M_WAITOK | M_ZERO); 638 639 vsc->sc_ops = &virtio_pci_ops; 640 if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 && 641 (revision == 1 || 642 (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) { 643 ret = virtio_pci_attach_10(sc, pa); 644 } 645 if (ret != 0 && revision == 0) { 646 /* revision 0 means 0.9 only or both 0.9 and 1.0 */ 647 ret = virtio_pci_attach_09(sc, pa); 648 } 649 if (ret != 0) { 650 printf(": Cannot attach (%d)\n", ret); 651 goto free; 652 } 653 654 sc->sc_irq_type = IRQ_NO_MSIX; 655 if (virtio_pci_adjust_config_region(sc, 656 VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI) != 0) 657 { 658 goto free; 659 } 660 661 virtio_device_reset(vsc); 662 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 663 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 664 665 printf("\n"); 666 vpa.vpa_va.va_devid = id; 667 vsc->sc_child = NULL; 668 config_found(self, &vpa, NULL); 669 if (vsc->sc_child == NULL) { 670 printf("%s: no matching child driver; not configured\n", 671 vsc->sc_dev.dv_xname); 672 goto err; 673 } 674 if (vsc->sc_child == VIRTIO_CHILD_ERROR) { 675 printf("%s: virtio configuration failed\n", 676 vsc->sc_dev.dv_xname); 677 goto err; 678 } 679 680 return; 681 682 err: 683 /* no pci_mapreg_unmap() or pci_intr_unmap() */ 684 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 685 free: 686 free(sc->sc_intr, M_DEVBUF, sc->sc_nintr * sizeof(*sc->sc_intr)); 687 } 688 689 int 690 virtio_pci_attach_finish(struct virtio_softc *vsc, 691 struct virtio_attach_args *va) 692 { 693 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 694 struct virtio_pci_attach_args *vpa = 695 (struct virtio_pci_attach_args *)va; 696 pci_intr_handle_t ih; 697 pci_chipset_tag_t pc = vpa->vpa_pa->pa_pc; 698 char const *intrstr; 699 700 if (sc->sc_irq_type == IRQ_MSIX_CHILD) { 701 intrstr = "msix"; 702 } else if (virtio_pci_setup_msix(sc, vpa, 0) == 0) { 703 sc->sc_irq_type = IRQ_MSIX_PER_VQ; 704 intrstr = "msix per-VQ"; 705 } else if (virtio_pci_setup_msix(sc, vpa, 1) == 0) { 706 sc->sc_irq_type = IRQ_MSIX_SHARED; 707 intrstr = "msix shared"; 708 } else { 709 int (*ih_func)(void *) = virtio_pci_legacy_intr; 710 if (pci_intr_map_msi(vpa->vpa_pa, &ih) != 0 && 711 pci_intr_map(vpa->vpa_pa, &ih) != 0) { 712 printf("%s: couldn't map interrupt\n", 713 vsc->sc_dev.dv_xname); 714 return -EIO; 715 } 716 intrstr = pci_intr_string(pc, ih); 717 /* 718 * We always set the IPL_MPSAFE flag in order to do the relatively 719 * expensive ISR read without lock, and then grab the kernel lock in 720 * the interrupt handler. 721 */ 722 if (vsc->sc_ipl & IPL_MPSAFE) 723 ih_func = virtio_pci_legacy_intr_mpsafe; 724 sc->sc_intr[0].ih = pci_intr_establish(pc, ih, 725 vsc->sc_ipl | IPL_MPSAFE, ih_func, sc, 726 vsc->sc_child->dv_xname); 727 if (sc->sc_intr[0].ih == NULL) { 728 printf("%s: couldn't establish interrupt", 729 vsc->sc_dev.dv_xname); 730 if (intrstr != NULL) 731 printf(" at %s", intrstr); 732 printf("\n"); 733 return -EIO; 734 } 735 } 736 737 printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr); 738 return 0; 739 } 740 741 int 742 virtio_pci_detach(struct device *self, int flags) 743 { 744 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 745 struct virtio_softc *vsc = &sc->sc_sc; 746 int r; 747 748 if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) { 749 r = config_detach(vsc->sc_child, flags); 750 if (r) 751 return r; 752 } 753 KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR); 754 KASSERT(vsc->sc_vqs == 0); 755 virtio_pci_free_irqs(sc); 756 if (sc->sc_iosize) 757 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 758 sc->sc_iosize = 0; 759 760 return 0; 761 } 762 763 int 764 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc, int offset) 765 { 766 if (sc->sc_sc.sc_version_1) 767 return 0; 768 if (sc->sc_devcfg_offset == offset) 769 return 0; 770 sc->sc_devcfg_offset = offset; 771 sc->sc_devcfg_iosize = sc->sc_iosize - offset; 772 sc->sc_devcfg_iot = sc->sc_iot; 773 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset, 774 sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) { 775 printf("%s: can't map config i/o space\n", 776 sc->sc_sc.sc_dev.dv_xname); 777 return 1; 778 } 779 return 0; 780 } 781 782 /* 783 * Feature negotiation. 784 * Prints available / negotiated features if guest_feature_names != NULL and 785 * VIRTIO_DEBUG is 1 786 */ 787 int 788 virtio_pci_negotiate_features(struct virtio_softc *vsc, 789 const struct virtio_feature_name *guest_feature_names) 790 { 791 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 792 uint64_t host, negotiated; 793 794 vsc->sc_active_features = 0; 795 796 /* 797 * We enable indirect descriptors by default. They can be switched 798 * off by setting bit 1 in the driver flags, see config(8) 799 */ 800 if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) && 801 !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) { 802 vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC; 803 } else if (guest_feature_names != NULL) { 804 printf(" RingIndirectDesc disabled by UKC"); 805 } 806 807 /* 808 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it. 809 * If it did, check if it is disabled by bit 2 in the driver flags. 810 */ 811 if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) && 812 ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) || 813 (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) { 814 if (guest_feature_names != NULL) 815 printf(" RingEventIdx disabled by UKC"); 816 vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX; 817 } 818 819 if (vsc->sc_version_1) { 820 return virtio_pci_negotiate_features_10(vsc, 821 guest_feature_names); 822 } 823 824 /* virtio 0.9 only */ 825 host = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 826 VIRTIO_CONFIG_DEVICE_FEATURES); 827 negotiated = host & vsc->sc_driver_features; 828 #if VIRTIO_DEBUG 829 if (guest_feature_names) 830 virtio_log_features(host, negotiated, guest_feature_names); 831 #endif 832 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 833 VIRTIO_CONFIG_GUEST_FEATURES, negotiated); 834 vsc->sc_active_features = negotiated; 835 if (negotiated & VIRTIO_F_RING_INDIRECT_DESC) 836 vsc->sc_indirect = 1; 837 else 838 vsc->sc_indirect = 0; 839 return 0; 840 } 841 842 int 843 virtio_pci_negotiate_features_10(struct virtio_softc *vsc, 844 const struct virtio_feature_name *guest_feature_names) 845 { 846 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 847 uint64_t host, negotiated; 848 849 vsc->sc_driver_features |= VIRTIO_F_VERSION_1; 850 /* 851 * XXX Without this SEV doesn't work with a KVM/qemu hypervisor on 852 * XXX amd64. 853 */ 854 vsc->sc_driver_features |= VIRTIO_F_ACCESS_PLATFORM; 855 /* notify on empty is 0.9 only */ 856 vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 857 CWRITE(sc, device_feature_select, 0); 858 host = CREAD(sc, device_feature); 859 CWRITE(sc, device_feature_select, 1); 860 host |= (uint64_t)CREAD(sc, device_feature) << 32; 861 862 negotiated = host & vsc->sc_driver_features; 863 #if VIRTIO_DEBUG 864 if (guest_feature_names) 865 virtio_log_features(host, negotiated, guest_feature_names); 866 #endif 867 CWRITE(sc, driver_feature_select, 0); 868 CWRITE(sc, driver_feature, negotiated & 0xffffffff); 869 CWRITE(sc, driver_feature_select, 1); 870 CWRITE(sc, driver_feature, negotiated >> 32); 871 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 872 873 if ((CREAD(sc, device_status) & 874 VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 875 printf("%s: Feature negotiation failed\n", 876 vsc->sc_dev.dv_xname); 877 CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 878 return ENXIO; 879 } 880 vsc->sc_active_features = negotiated; 881 882 if (negotiated & VIRTIO_F_RING_INDIRECT_DESC) 883 vsc->sc_indirect = 1; 884 else 885 vsc->sc_indirect = 0; 886 887 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 888 #if VIRTIO_DEBUG 889 printf("%s: Host rejected Version_1\n", __func__); 890 #endif 891 CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 892 return EINVAL; 893 } 894 return 0; 895 } 896 897 /* 898 * Device configuration registers. 899 */ 900 uint8_t 901 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index) 902 { 903 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 904 return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 905 } 906 907 uint16_t 908 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index) 909 { 910 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 911 return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 912 } 913 914 uint32_t 915 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index) 916 { 917 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 918 return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 919 } 920 921 uint64_t 922 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index) 923 { 924 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 925 uint64_t r; 926 927 r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 928 index + sizeof(uint32_t)); 929 r <<= 32; 930 r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 931 return r; 932 } 933 934 void 935 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index, 936 uint8_t value) 937 { 938 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 939 bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 940 } 941 942 void 943 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index, 944 uint16_t value) 945 { 946 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 947 bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 948 } 949 950 void 951 virtio_pci_write_device_config_4(struct virtio_softc *vsc, 952 int index, uint32_t value) 953 { 954 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 955 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 956 } 957 958 void 959 virtio_pci_write_device_config_8(struct virtio_softc *vsc, 960 int index, uint64_t value) 961 { 962 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 963 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 964 index, value & 0xffffffff); 965 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 966 index + sizeof(uint32_t), value >> 32); 967 } 968 969 int 970 virtio_pci_msix_establish(struct virtio_pci_softc *sc, 971 struct virtio_pci_attach_args *vpa, int idx, struct cpu_info *ci, 972 int (*handler)(void *), void *ih_arg) 973 { 974 struct virtio_softc *vsc = &sc->sc_sc; 975 pci_intr_handle_t ih; 976 int r; 977 978 KASSERT(idx < sc->sc_nintr); 979 980 r = pci_intr_map_msix(vpa->vpa_pa, idx, &ih); 981 if (r != 0) { 982 #if VIRTIO_DEBUG 983 printf("%s[%d]: pci_intr_map_msix failed\n", 984 vsc->sc_dev.dv_xname, idx); 985 #endif 986 return r; 987 } 988 snprintf(sc->sc_intr[idx].name, sizeof(sc->sc_intr[idx].name), "%s:%d", 989 vsc->sc_child->dv_xname, idx); 990 sc->sc_intr[idx].ih = pci_intr_establish_cpu(sc->sc_pc, ih, vsc->sc_ipl, 991 ci, handler, ih_arg, sc->sc_intr[idx].name); 992 if (sc->sc_intr[idx].ih == NULL) { 993 printf("%s[%d]: couldn't establish msix interrupt\n", 994 vsc->sc_child->dv_xname, idx); 995 return ENOMEM; 996 } 997 virtio_pci_adjust_config_region(sc, VIRTIO_CONFIG_DEVICE_CONFIG_MSI); 998 return 0; 999 } 1000 1001 void 1002 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector) 1003 { 1004 if (sc->sc_sc.sc_version_1) { 1005 CWRITE(sc, queue_select, idx); 1006 CWRITE(sc, queue_msix_vector, vector); 1007 } else { 1008 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 1009 VIRTIO_CONFIG_QUEUE_SELECT, idx); 1010 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 1011 VIRTIO_MSI_QUEUE_VECTOR, vector); 1012 } 1013 } 1014 1015 void 1016 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector) 1017 { 1018 if (sc->sc_sc.sc_version_1) { 1019 CWRITE(sc, config_msix_vector, vector); 1020 } else { 1021 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 1022 VIRTIO_MSI_CONFIG_VECTOR, vector); 1023 } 1024 } 1025 1026 1027 void 1028 virtio_pci_free_irqs(struct virtio_pci_softc *sc) 1029 { 1030 struct virtio_softc *vsc = &sc->sc_sc; 1031 int i; 1032 1033 if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) { 1034 for (i = 0; i < vsc->sc_nvqs; i++) { 1035 virtio_pci_set_msix_queue_vector(sc, i, 1036 VIRTIO_MSI_NO_VECTOR); 1037 } 1038 } 1039 1040 for (i = 0; i < sc->sc_nintr; i++) { 1041 if (sc->sc_intr[i].ih) { 1042 pci_intr_disestablish(sc->sc_pc, sc->sc_intr[i].ih); 1043 sc->sc_intr[i].ih = NULL; 1044 } 1045 } 1046 1047 /* XXX msix_delroute does not unset PCI_MSIX_MC_MSIXE -> leave alone? */ 1048 virtio_pci_adjust_config_region(sc, VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI); 1049 } 1050 1051 int 1052 virtio_pci_setup_msix(struct virtio_pci_softc *sc, 1053 struct virtio_pci_attach_args *vpa, int shared) 1054 { 1055 struct virtio_softc *vsc = &sc->sc_sc; 1056 int i, r = 0; 1057 1058 /* Shared needs config + queue */ 1059 if (shared && vpa->vpa_va.va_nintr < 1 + 1) 1060 return ERANGE; 1061 /* Per VQ needs config + N * queue */ 1062 if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs) 1063 return ERANGE; 1064 1065 r = virtio_pci_msix_establish(sc, vpa, 0, NULL, virtio_pci_config_intr, vsc); 1066 if (r != 0) 1067 return r; 1068 1069 if (shared) { 1070 r = virtio_pci_msix_establish(sc, vpa, 1, NULL, 1071 virtio_pci_shared_queue_intr, vsc); 1072 if (r != 0) 1073 goto fail; 1074 1075 for (i = 0; i < vsc->sc_nvqs; i++) 1076 vsc->sc_vqs[i].vq_intr_vec = 1; 1077 } else { 1078 for (i = 0; i < vsc->sc_nvqs; i++) { 1079 r = virtio_pci_msix_establish(sc, vpa, i + 1, NULL, 1080 virtio_pci_queue_intr, &vsc->sc_vqs[i]); 1081 if (r != 0) 1082 goto fail; 1083 vsc->sc_vqs[i].vq_intr_vec = i + 1; 1084 } 1085 } 1086 1087 return 0; 1088 fail: 1089 virtio_pci_free_irqs(sc); 1090 return r; 1091 } 1092 1093 int 1094 virtio_pci_intr_establish(struct virtio_softc *vsc, 1095 struct virtio_attach_args *va, int vec, struct cpu_info *ci, 1096 int (*func)(void *), void *arg) 1097 { 1098 struct virtio_pci_attach_args *vpa; 1099 struct virtio_pci_softc *sc; 1100 1101 if (vsc->sc_ops != &virtio_pci_ops) 1102 return ENXIO; 1103 1104 vpa = (struct virtio_pci_attach_args *)va; 1105 sc = (struct virtio_pci_softc *)vsc; 1106 1107 if (vec >= sc->sc_nintr || sc->sc_nintr <= 1) 1108 return ERANGE; 1109 1110 sc->sc_irq_type = IRQ_MSIX_CHILD; 1111 return virtio_pci_msix_establish(sc, vpa, vec, ci, func, arg); 1112 } 1113 1114 void 1115 virtio_pci_intr_barrier(struct virtio_softc *vsc) 1116 { 1117 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 1118 int i; 1119 1120 for (i = 0; i < sc->sc_nintr; i++) { 1121 if (sc->sc_intr[i].ih != NULL) 1122 intr_barrier(sc->sc_intr[i].ih); 1123 } 1124 } 1125 1126 /* 1127 * Interrupt handler. 1128 */ 1129 1130 /* 1131 * Only used without MSI-X 1132 */ 1133 int 1134 virtio_pci_legacy_intr(void *arg) 1135 { 1136 struct virtio_pci_softc *sc = arg; 1137 struct virtio_softc *vsc = &sc->sc_sc; 1138 int isr, r = 0; 1139 1140 /* check and ack the interrupt */ 1141 isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0); 1142 if (isr == 0) 1143 return 0; 1144 KERNEL_LOCK(); 1145 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1146 (vsc->sc_config_change != NULL)) { 1147 r = (vsc->sc_config_change)(vsc); 1148 } 1149 r |= virtio_check_vqs(vsc); 1150 KERNEL_UNLOCK(); 1151 1152 return r; 1153 } 1154 1155 int 1156 virtio_pci_legacy_intr_mpsafe(void *arg) 1157 { 1158 struct virtio_pci_softc *sc = arg; 1159 struct virtio_softc *vsc = &sc->sc_sc; 1160 int isr, r = 0; 1161 1162 /* check and ack the interrupt */ 1163 isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0); 1164 if (isr == 0) 1165 return 0; 1166 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1167 (vsc->sc_config_change != NULL)) { 1168 r = (vsc->sc_config_change)(vsc); 1169 } 1170 r |= virtio_check_vqs(vsc); 1171 return r; 1172 } 1173 1174 /* 1175 * Only used with MSI-X 1176 */ 1177 int 1178 virtio_pci_config_intr(void *arg) 1179 { 1180 struct virtio_softc *vsc = arg; 1181 1182 if (vsc->sc_config_change != NULL) 1183 return vsc->sc_config_change(vsc); 1184 return 0; 1185 } 1186 1187 /* 1188 * Only used with MSI-X 1189 */ 1190 int 1191 virtio_pci_queue_intr(void *arg) 1192 { 1193 struct virtqueue *vq = arg; 1194 struct virtio_softc *vsc = vq->vq_owner; 1195 1196 return virtio_check_vq(vsc, vq); 1197 } 1198 1199 int 1200 virtio_pci_shared_queue_intr(void *arg) 1201 { 1202 struct virtio_softc *vsc = arg; 1203 1204 return virtio_check_vqs(vsc); 1205 } 1206 1207 /* 1208 * Interrupt handler to be used when polling. 1209 * We cannot use isr here because it is not defined in MSI-X mode. 1210 */ 1211 int 1212 virtio_pci_poll_intr(void *arg) 1213 { 1214 struct virtio_pci_softc *sc = arg; 1215 struct virtio_softc *vsc = &sc->sc_sc; 1216 int r = 0; 1217 1218 if (vsc->sc_config_change != NULL) 1219 r = (vsc->sc_config_change)(vsc); 1220 1221 r |= virtio_check_vqs(vsc); 1222 1223 return r; 1224 } 1225 1226 void 1227 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx) 1228 { 1229 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 1230 unsigned offset = 0; 1231 if (vsc->sc_version_1) { 1232 offset = vsc->sc_vqs[idx].vq_notify_off * 1233 sc->sc_notify_off_multiplier; 1234 } 1235 bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx); 1236 } 1237