1 /* $OpenBSD: virtio_pci.c,v 1.44 2024/10/29 08:42:05 sf Exp $ */ 2 /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/device.h> 33 #include <sys/mutex.h> 34 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 #include <dev/pci/virtio_pcireg.h> 39 40 #include <dev/pv/virtioreg.h> 41 #include <dev/pv/virtiovar.h> 42 43 #define DNPRINTF(n,x...) \ 44 do { if (VIRTIO_DEBUG >= n) printf(x); } while(0) 45 46 47 /* 48 * XXX: Before being used on big endian arches, the access to config registers 49 * XXX: needs to be reviewed/fixed. The non-device specific registers are 50 * XXX: PCI-endian while the device specific registers are native endian. 51 */ 52 53 #define MAX_MSIX_VECS 8 54 55 struct virtio_pci_softc; 56 struct virtio_pci_attach_args; 57 58 int virtio_pci_match(struct device *, void *, void *); 59 void virtio_pci_attach(struct device *, struct device *, void *); 60 int virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa); 61 int virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa); 62 int virtio_pci_detach(struct device *, int); 63 64 void virtio_pci_kick(struct virtio_softc *, uint16_t); 65 int virtio_pci_adjust_config_region(struct virtio_pci_softc *); 66 uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int); 67 uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int); 68 uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int); 69 uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int); 70 void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t); 71 void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t); 72 void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t); 73 void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t); 74 uint16_t virtio_pci_read_queue_size(struct virtio_softc *, uint16_t); 75 void virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t); 76 void virtio_pci_setup_intrs(struct virtio_softc *); 77 int virtio_pci_get_status(struct virtio_softc *); 78 void virtio_pci_set_status(struct virtio_softc *, int); 79 int virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *); 80 int virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *); 81 void virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t); 82 void virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t); 83 int virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, int (*)(void *), void *); 84 int virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int); 85 void virtio_pci_free_irqs(struct virtio_pci_softc *); 86 int virtio_pci_poll_intr(void *); 87 int virtio_pci_legacy_intr(void *); 88 int virtio_pci_legacy_intr_mpsafe(void *); 89 int virtio_pci_config_intr(void *); 90 int virtio_pci_queue_intr(void *); 91 int virtio_pci_shared_queue_intr(void *); 92 int virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen); 93 #if VIRTIO_DEBUG 94 void virtio_pci_dump_caps(struct virtio_pci_softc *sc); 95 #endif 96 97 enum irq_type { 98 IRQ_NO_MSIX, 99 IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */ 100 IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */ 101 }; 102 103 struct virtio_pci_intr { 104 char name[16]; 105 void *ih; 106 }; 107 108 struct virtio_pci_softc { 109 struct virtio_softc sc_sc; 110 pci_chipset_tag_t sc_pc; 111 pcitag_t sc_ptag; 112 113 bus_space_tag_t sc_iot; 114 bus_space_handle_t sc_ioh; 115 bus_size_t sc_iosize; 116 117 bus_space_tag_t sc_bars_iot[4]; 118 bus_space_handle_t sc_bars_ioh[4]; 119 bus_size_t sc_bars_iosize[4]; 120 121 bus_space_tag_t sc_notify_iot; 122 bus_space_handle_t sc_notify_ioh; 123 bus_size_t sc_notify_iosize; 124 unsigned int sc_notify_off_multiplier; 125 126 bus_space_tag_t sc_devcfg_iot; 127 bus_space_handle_t sc_devcfg_ioh; 128 bus_size_t sc_devcfg_iosize; 129 /* 130 * With 0.9, the offset of the devcfg region in the io bar changes 131 * depending on MSI-X being enabled or not. 132 * With 1.0, this field is still used to remember if MSI-X is enabled 133 * or not. 134 */ 135 unsigned int sc_devcfg_offset; 136 137 bus_space_tag_t sc_isr_iot; 138 bus_space_handle_t sc_isr_ioh; 139 bus_size_t sc_isr_iosize; 140 141 struct virtio_pci_intr *sc_intr; 142 int sc_nintr; 143 144 enum irq_type sc_irq_type; 145 }; 146 147 struct virtio_pci_attach_args { 148 struct virtio_attach_args vpa_va; 149 struct pci_attach_args *vpa_pa; 150 }; 151 152 153 const struct cfattach virtio_pci_ca = { 154 sizeof(struct virtio_pci_softc), 155 virtio_pci_match, 156 virtio_pci_attach, 157 virtio_pci_detach, 158 NULL 159 }; 160 161 const struct virtio_ops virtio_pci_ops = { 162 virtio_pci_kick, 163 virtio_pci_read_device_config_1, 164 virtio_pci_read_device_config_2, 165 virtio_pci_read_device_config_4, 166 virtio_pci_read_device_config_8, 167 virtio_pci_write_device_config_1, 168 virtio_pci_write_device_config_2, 169 virtio_pci_write_device_config_4, 170 virtio_pci_write_device_config_8, 171 virtio_pci_read_queue_size, 172 virtio_pci_setup_queue, 173 virtio_pci_setup_intrs, 174 virtio_pci_get_status, 175 virtio_pci_set_status, 176 virtio_pci_negotiate_features, 177 virtio_pci_poll_intr, 178 }; 179 180 static inline uint64_t 181 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size) 182 { 183 uint64_t val; 184 switch (size) { 185 case 1: 186 val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off); 187 break; 188 case 2: 189 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off); 190 break; 191 case 4: 192 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 193 break; 194 case 8: 195 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 196 off + sizeof(uint32_t)); 197 val <<= 32; 198 val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 199 break; 200 } 201 return val; 202 } 203 204 #define CREAD(sc, memb) _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \ 205 sizeof(((struct virtio_pci_common_cfg *)0)->memb)) 206 207 #define CWRITE(sc, memb, val) \ 208 do { \ 209 struct virtio_pci_common_cfg c; \ 210 size_t off = offsetof(struct virtio_pci_common_cfg, memb); \ 211 size_t size = sizeof(c.memb); \ 212 \ 213 DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n", \ 214 __func__, __LINE__, off, size, (unsigned long long)val); \ 215 switch (size) { \ 216 case 1: \ 217 bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val); \ 218 break; \ 219 case 2: \ 220 bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val); \ 221 break; \ 222 case 4: \ 223 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); \ 224 break; \ 225 case 8: \ 226 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, \ 227 (val) & 0xffffffff); \ 228 bus_space_write_4(sc->sc_iot, sc->sc_ioh, \ 229 (off) + sizeof(uint32_t), (uint64_t)(val) >> 32); \ 230 break; \ 231 } \ 232 } while (0) 233 234 uint16_t 235 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx) 236 { 237 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 238 uint16_t ret; 239 if (sc->sc_sc.sc_version_1) { 240 CWRITE(sc, queue_select, idx); 241 ret = CREAD(sc, queue_size); 242 } else { 243 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 244 VIRTIO_CONFIG_QUEUE_SELECT, idx); 245 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 246 VIRTIO_CONFIG_QUEUE_SIZE); 247 } 248 return ret; 249 } 250 251 void 252 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq, 253 uint64_t addr) 254 { 255 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 256 if (sc->sc_sc.sc_version_1) { 257 CWRITE(sc, queue_select, vq->vq_index); 258 if (addr == 0) { 259 CWRITE(sc, queue_enable, 0); 260 CWRITE(sc, queue_desc, 0); 261 CWRITE(sc, queue_avail, 0); 262 CWRITE(sc, queue_used, 0); 263 } else { 264 CWRITE(sc, queue_desc, addr); 265 CWRITE(sc, queue_avail, addr + vq->vq_availoffset); 266 CWRITE(sc, queue_used, addr + vq->vq_usedoffset); 267 CWRITE(sc, queue_enable, 1); 268 vq->vq_notify_off = CREAD(sc, queue_notify_off); 269 } 270 } else { 271 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 272 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index); 273 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 274 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 275 } 276 } 277 278 void 279 virtio_pci_setup_intrs(struct virtio_softc *vsc) 280 { 281 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 282 int i; 283 284 if (sc->sc_irq_type == IRQ_NO_MSIX) 285 return; 286 287 for (i = 0; i < vsc->sc_nvqs; i++) { 288 unsigned vec = vsc->sc_vqs[i].vq_intr_vec; 289 virtio_pci_set_msix_queue_vector(sc, i, vec); 290 } 291 if (vsc->sc_config_change) 292 virtio_pci_set_msix_config_vector(sc, 0); 293 } 294 295 int 296 virtio_pci_get_status(struct virtio_softc *vsc) 297 { 298 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 299 300 if (sc->sc_sc.sc_version_1) 301 return CREAD(sc, device_status); 302 else 303 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 304 VIRTIO_CONFIG_DEVICE_STATUS); 305 } 306 307 void 308 virtio_pci_set_status(struct virtio_softc *vsc, int status) 309 { 310 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 311 int old = 0; 312 313 if (sc->sc_sc.sc_version_1) { 314 if (status == 0) { 315 CWRITE(sc, device_status, 0); 316 while (CREAD(sc, device_status) != 0) { 317 CPU_BUSY_CYCLE(); 318 } 319 } else { 320 old = CREAD(sc, device_status); 321 CWRITE(sc, device_status, status|old); 322 } 323 } else { 324 if (status == 0) { 325 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 326 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 327 while (bus_space_read_1(sc->sc_iot, sc->sc_ioh, 328 VIRTIO_CONFIG_DEVICE_STATUS) != 0) { 329 CPU_BUSY_CYCLE(); 330 } 331 } else { 332 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 333 VIRTIO_CONFIG_DEVICE_STATUS); 334 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 335 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 336 } 337 } 338 } 339 340 int 341 virtio_pci_match(struct device *parent, void *match, void *aux) 342 { 343 struct pci_attach_args *pa; 344 345 pa = (struct pci_attach_args *)aux; 346 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD && 347 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL) 348 return 1; 349 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET) 350 return 0; 351 /* virtio 0.9 */ 352 if (PCI_PRODUCT(pa->pa_id) >= 0x1000 && 353 PCI_PRODUCT(pa->pa_id) <= 0x103f && 354 PCI_REVISION(pa->pa_class) == 0) 355 return 1; 356 /* virtio 1.0 */ 357 if (PCI_PRODUCT(pa->pa_id) >= 0x1040 && 358 PCI_PRODUCT(pa->pa_id) <= 0x107f && 359 PCI_REVISION(pa->pa_class) == 1) 360 return 1; 361 return 0; 362 } 363 364 #if VIRTIO_DEBUG 365 void 366 virtio_pci_dump_caps(struct virtio_pci_softc *sc) 367 { 368 pci_chipset_tag_t pc = sc->sc_pc; 369 pcitag_t tag = sc->sc_ptag; 370 int offset; 371 union { 372 pcireg_t reg[4]; 373 struct virtio_pci_cap vcap; 374 } v; 375 376 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0])) 377 return; 378 379 printf("\n"); 380 do { 381 for (int i = 0; i < 4; i++) 382 v.reg[i] = pci_conf_read(pc, tag, offset + i * 4); 383 printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n", 384 __func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar, 385 v.vcap.offset, v.vcap.length); 386 offset = v.vcap.cap_next; 387 } while (offset != 0); 388 } 389 #endif 390 391 int 392 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen) 393 { 394 pci_chipset_tag_t pc = sc->sc_pc; 395 pcitag_t tag = sc->sc_ptag; 396 unsigned int offset, i, len; 397 union { 398 pcireg_t reg[8]; 399 struct virtio_pci_cap vcap; 400 } *v = buf; 401 402 if (buflen < sizeof(struct virtio_pci_cap)) 403 return ERANGE; 404 405 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 406 return ENOENT; 407 408 do { 409 for (i = 0; i < 4; i++) 410 v->reg[i] = pci_conf_read(pc, tag, offset + i * 4); 411 if (v->vcap.cfg_type == cfg_type) 412 break; 413 offset = v->vcap.cap_next; 414 } while (offset != 0); 415 416 if (offset == 0) 417 return ENOENT; 418 419 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 420 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 421 if (len > buflen) { 422 printf("%s: cap too large\n", __func__); 423 return ERANGE; 424 } 425 for (i = 4; i < len / sizeof(pcireg_t); i++) 426 v->reg[i] = pci_conf_read(pc, tag, offset + i * 4); 427 } 428 429 return 0; 430 } 431 432 433 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 434 sizeof(pcireg_t)) 435 436 int 437 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa) 438 { 439 struct virtio_pci_cap common, isr, device; 440 struct virtio_pci_notify_cap notify; 441 int have_device_cfg = 0; 442 bus_size_t bars[NMAPREG] = { 0 }; 443 int bars_idx[NMAPREG] = { 0 }; 444 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 445 int i, j = 0, ret = 0; 446 447 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0) 448 return ENODEV; 449 450 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, ¬ify, sizeof(notify)) != 0) 451 return ENODEV; 452 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0) 453 return ENODEV; 454 if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0) 455 memset(&device, 0, sizeof(device)); 456 else 457 have_device_cfg = 1; 458 459 /* 460 * XXX Maybe there are devices that offer the pci caps but not the 461 * XXX VERSION_1 feature bit? Then we should check the feature bit 462 * XXX here and fall back to 0.9 out if not present. 463 */ 464 465 /* Figure out which bars we need to map */ 466 for (i = 0; i < nitems(caps); i++) { 467 int bar = caps[i]->bar; 468 bus_size_t len = caps[i]->offset + caps[i]->length; 469 if (caps[i]->length == 0) 470 continue; 471 if (bars[bar] < len) 472 bars[bar] = len; 473 } 474 475 for (i = 0; i < nitems(bars); i++) { 476 int reg; 477 pcireg_t type; 478 if (bars[i] == 0) 479 continue; 480 reg = PCI_MAPREG_START + i * 4; 481 type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg); 482 if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j], 483 &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j], 484 bars[i])) { 485 printf("%s: can't map bar %u \n", 486 sc->sc_sc.sc_dev.dv_xname, i); 487 ret = EIO; 488 goto err; 489 } 490 bars_idx[i] = j; 491 j++; 492 } 493 494 i = bars_idx[notify.cap.bar]; 495 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 496 notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) { 497 printf("%s: can't map notify i/o space\n", 498 sc->sc_sc.sc_dev.dv_xname); 499 ret = EIO; 500 goto err; 501 } 502 sc->sc_notify_iosize = notify.cap.length; 503 sc->sc_notify_iot = sc->sc_bars_iot[i]; 504 sc->sc_notify_off_multiplier = notify.notify_off_multiplier; 505 506 if (have_device_cfg) { 507 i = bars_idx[device.bar]; 508 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 509 device.offset, device.length, &sc->sc_devcfg_ioh) != 0) { 510 printf("%s: can't map devcfg i/o space\n", 511 sc->sc_sc.sc_dev.dv_xname); 512 ret = EIO; 513 goto err; 514 } 515 sc->sc_devcfg_iosize = device.length; 516 sc->sc_devcfg_iot = sc->sc_bars_iot[i]; 517 } 518 519 i = bars_idx[isr.bar]; 520 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 521 isr.offset, isr.length, &sc->sc_isr_ioh) != 0) { 522 printf("%s: can't map isr i/o space\n", 523 sc->sc_sc.sc_dev.dv_xname); 524 ret = EIO; 525 goto err; 526 } 527 sc->sc_isr_iosize = isr.length; 528 sc->sc_isr_iot = sc->sc_bars_iot[i]; 529 530 i = bars_idx[common.bar]; 531 if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i], 532 common.offset, common.length, &sc->sc_ioh) != 0) { 533 printf("%s: can't map common i/o space\n", 534 sc->sc_sc.sc_dev.dv_xname); 535 ret = EIO; 536 goto err; 537 } 538 sc->sc_iosize = common.length; 539 sc->sc_iot = sc->sc_bars_iot[i]; 540 541 sc->sc_sc.sc_version_1 = 1; 542 return 0; 543 544 err: 545 /* there is no pci_mapreg_unmap() */ 546 return ret; 547 } 548 549 int 550 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa) 551 { 552 struct virtio_softc *vsc = &sc->sc_sc; 553 pcireg_t type; 554 555 type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 556 if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0, 557 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) { 558 printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname); 559 return EIO; 560 } 561 562 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 563 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) { 564 printf("%s: can't map notify i/o space\n", 565 vsc->sc_dev.dv_xname); 566 return EIO; 567 } 568 sc->sc_notify_iosize = 2; 569 sc->sc_notify_iot = sc->sc_iot; 570 571 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 572 VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) { 573 printf("%s: can't map isr i/o space\n", 574 vsc->sc_dev.dv_xname); 575 return EIO; 576 } 577 sc->sc_isr_iosize = 1; 578 sc->sc_isr_iot = sc->sc_iot; 579 580 return 0; 581 } 582 583 void 584 virtio_pci_attach(struct device *parent, struct device *self, void *aux) 585 { 586 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 587 struct virtio_softc *vsc = &sc->sc_sc; 588 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 589 pci_chipset_tag_t pc = pa->pa_pc; 590 pcitag_t tag = pa->pa_tag; 591 int revision, ret = ENODEV; 592 pcireg_t id; 593 char const *intrstr; 594 pci_intr_handle_t ih; 595 struct virtio_pci_attach_args vpa = { { 0 }, pa }; 596 597 revision = PCI_REVISION(pa->pa_class); 598 switch (revision) { 599 case 0: 600 /* subsystem ID shows what I am */ 601 id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 602 break; 603 case 1: 604 id = PCI_PRODUCT(pa->pa_id) - 0x1040; 605 break; 606 default: 607 printf("unknown revision 0x%02x; giving up\n", revision); 608 return; 609 } 610 611 sc->sc_pc = pc; 612 sc->sc_ptag = pa->pa_tag; 613 vsc->sc_dmat = pa->pa_dmat; 614 615 #if defined(__i386__) || defined(__amd64__) 616 /* 617 * For virtio, ignore normal MSI black/white-listing depending on the 618 * PCI bridge but enable it unconditionally. 619 */ 620 pa->pa_flags |= PCI_FLAGS_MSI_ENABLED; 621 #endif 622 623 #if VIRTIO_DEBUG 624 virtio_pci_dump_caps(sc); 625 #endif 626 627 sc->sc_nintr = min(MAX_MSIX_VECS, pci_intr_msix_count(pa)); 628 sc->sc_nintr = max(sc->sc_nintr, 1); 629 vpa.vpa_va.va_nintr = sc->sc_nintr; 630 631 sc->sc_intr = mallocarray(sc->sc_nintr, sizeof(*sc->sc_intr), 632 M_DEVBUF, M_WAITOK | M_ZERO); 633 634 vsc->sc_ops = &virtio_pci_ops; 635 if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 && 636 (revision == 1 || 637 (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) { 638 ret = virtio_pci_attach_10(sc, pa); 639 } 640 if (ret != 0 && revision == 0) { 641 /* revision 0 means 0.9 only or both 0.9 and 1.0 */ 642 ret = virtio_pci_attach_09(sc, pa); 643 } 644 if (ret != 0) { 645 printf(": Cannot attach (%d)\n", ret); 646 goto fail_0; 647 } 648 649 sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 650 sc->sc_irq_type = IRQ_NO_MSIX; 651 if (virtio_pci_adjust_config_region(sc) != 0) 652 goto fail_0; 653 654 virtio_device_reset(vsc); 655 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 656 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 657 658 printf("\n"); 659 vpa.vpa_va.va_devid = id; 660 vsc->sc_child = NULL; 661 config_found(self, &vpa, NULL); 662 if (vsc->sc_child == NULL) { 663 printf("%s: no matching child driver; not configured\n", 664 vsc->sc_dev.dv_xname); 665 goto fail_1; 666 } 667 if (vsc->sc_child == VIRTIO_CHILD_ERROR) { 668 printf("%s: virtio configuration failed\n", 669 vsc->sc_dev.dv_xname); 670 goto fail_1; 671 } 672 673 if (virtio_pci_setup_msix(sc, &vpa, 0) == 0) { 674 sc->sc_irq_type = IRQ_MSIX_PER_VQ; 675 intrstr = "msix per-VQ"; 676 } else if (virtio_pci_setup_msix(sc, &vpa, 1) == 0) { 677 sc->sc_irq_type = IRQ_MSIX_SHARED; 678 intrstr = "msix shared"; 679 } else { 680 int (*ih_func)(void *) = virtio_pci_legacy_intr; 681 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 682 printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname); 683 goto fail_2; 684 } 685 intrstr = pci_intr_string(pc, ih); 686 /* 687 * We always set the IPL_MPSAFE flag in order to do the relatively 688 * expensive ISR read without lock, and then grab the kernel lock in 689 * the interrupt handler. 690 */ 691 if (vsc->sc_ipl & IPL_MPSAFE) 692 ih_func = virtio_pci_legacy_intr_mpsafe; 693 sc->sc_intr[0].ih = pci_intr_establish(pc, ih, 694 vsc->sc_ipl | IPL_MPSAFE, ih_func, sc, 695 vsc->sc_child->dv_xname); 696 if (sc->sc_intr[0].ih == NULL) { 697 printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname); 698 if (intrstr != NULL) 699 printf(" at %s", intrstr); 700 printf("\n"); 701 goto fail_2; 702 } 703 } 704 virtio_pci_setup_intrs(vsc); 705 printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr); 706 707 return; 708 709 fail_2: 710 config_detach(vsc->sc_child, 0); 711 fail_1: 712 /* no pci_mapreg_unmap() or pci_intr_unmap() */ 713 virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 714 fail_0: 715 free(sc->sc_intr, M_DEVBUF, sc->sc_nintr * sizeof(*sc->sc_intr)); 716 } 717 718 int 719 virtio_pci_detach(struct device *self, int flags) 720 { 721 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; 722 struct virtio_softc *vsc = &sc->sc_sc; 723 int r; 724 725 if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) { 726 r = config_detach(vsc->sc_child, flags); 727 if (r) 728 return r; 729 } 730 KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR); 731 KASSERT(vsc->sc_vqs == 0); 732 virtio_pci_free_irqs(sc); 733 if (sc->sc_iosize) 734 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 735 sc->sc_iosize = 0; 736 737 return 0; 738 } 739 740 int 741 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc) 742 { 743 if (sc->sc_sc.sc_version_1) 744 return 0; 745 sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset; 746 sc->sc_devcfg_iot = sc->sc_iot; 747 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset, 748 sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) { 749 printf("%s: can't map config i/o space\n", 750 sc->sc_sc.sc_dev.dv_xname); 751 return 1; 752 } 753 return 0; 754 } 755 756 /* 757 * Feature negotiation. 758 * Prints available / negotiated features if guest_feature_names != NULL and 759 * VIRTIO_DEBUG is 1 760 */ 761 int 762 virtio_pci_negotiate_features(struct virtio_softc *vsc, 763 const struct virtio_feature_name *guest_feature_names) 764 { 765 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 766 uint64_t host, negotiated; 767 768 vsc->sc_active_features = 0; 769 770 /* 771 * We enable indirect descriptors by default. They can be switched 772 * off by setting bit 1 in the driver flags, see config(8) 773 */ 774 if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) && 775 !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) { 776 vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC; 777 } else if (guest_feature_names != NULL) { 778 printf(" RingIndirectDesc disabled by UKC"); 779 } 780 781 /* 782 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it. 783 * If it did, check if it is disabled by bit 2 in the driver flags. 784 */ 785 if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) && 786 ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) || 787 (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) { 788 if (guest_feature_names != NULL) 789 printf(" RingEventIdx disabled by UKC"); 790 vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX; 791 } 792 793 if (vsc->sc_version_1) { 794 return virtio_pci_negotiate_features_10(vsc, 795 guest_feature_names); 796 } 797 798 /* virtio 0.9 only */ 799 host = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 800 VIRTIO_CONFIG_DEVICE_FEATURES); 801 negotiated = host & vsc->sc_driver_features; 802 #if VIRTIO_DEBUG 803 if (guest_feature_names) 804 virtio_log_features(host, negotiated, guest_feature_names); 805 #endif 806 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 807 VIRTIO_CONFIG_GUEST_FEATURES, negotiated); 808 vsc->sc_active_features = negotiated; 809 if (negotiated & VIRTIO_F_RING_INDIRECT_DESC) 810 vsc->sc_indirect = 1; 811 else 812 vsc->sc_indirect = 0; 813 return 0; 814 } 815 816 int 817 virtio_pci_negotiate_features_10(struct virtio_softc *vsc, 818 const struct virtio_feature_name *guest_feature_names) 819 { 820 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 821 uint64_t host, negotiated; 822 823 vsc->sc_driver_features |= VIRTIO_F_VERSION_1; 824 /* 825 * XXX Without this SEV doesn't work with a KVM/qemu hypervisor on 826 * XXX amd64. 827 */ 828 vsc->sc_driver_features |= VIRTIO_F_ACCESS_PLATFORM; 829 /* notify on empty is 0.9 only */ 830 vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 831 CWRITE(sc, device_feature_select, 0); 832 host = CREAD(sc, device_feature); 833 CWRITE(sc, device_feature_select, 1); 834 host |= (uint64_t)CREAD(sc, device_feature) << 32; 835 836 negotiated = host & vsc->sc_driver_features; 837 #if VIRTIO_DEBUG 838 if (guest_feature_names) 839 virtio_log_features(host, negotiated, guest_feature_names); 840 #endif 841 CWRITE(sc, driver_feature_select, 0); 842 CWRITE(sc, driver_feature, negotiated & 0xffffffff); 843 CWRITE(sc, driver_feature_select, 1); 844 CWRITE(sc, driver_feature, negotiated >> 32); 845 virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 846 847 if ((CREAD(sc, device_status) & 848 VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 849 printf("%s: Feature negotiation failed\n", 850 vsc->sc_dev.dv_xname); 851 CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 852 return ENXIO; 853 } 854 vsc->sc_active_features = negotiated; 855 856 if (negotiated & VIRTIO_F_RING_INDIRECT_DESC) 857 vsc->sc_indirect = 1; 858 else 859 vsc->sc_indirect = 0; 860 861 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 862 #if VIRTIO_DEBUG 863 printf("%s: Host rejected Version_1\n", __func__); 864 #endif 865 CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 866 return EINVAL; 867 } 868 return 0; 869 } 870 871 /* 872 * Device configuration registers. 873 */ 874 uint8_t 875 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index) 876 { 877 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 878 return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 879 } 880 881 uint16_t 882 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index) 883 { 884 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 885 return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 886 } 887 888 uint32_t 889 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index) 890 { 891 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 892 return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 893 } 894 895 uint64_t 896 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index) 897 { 898 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 899 uint64_t r; 900 901 r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 902 index + sizeof(uint32_t)); 903 r <<= 32; 904 r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index); 905 return r; 906 } 907 908 void 909 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index, 910 uint8_t value) 911 { 912 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 913 bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 914 } 915 916 void 917 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index, 918 uint16_t value) 919 { 920 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 921 bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 922 } 923 924 void 925 virtio_pci_write_device_config_4(struct virtio_softc *vsc, 926 int index, uint32_t value) 927 { 928 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 929 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value); 930 } 931 932 void 933 virtio_pci_write_device_config_8(struct virtio_softc *vsc, 934 int index, uint64_t value) 935 { 936 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 937 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 938 index, value & 0xffffffff); 939 bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 940 index + sizeof(uint32_t), value >> 32); 941 } 942 943 int 944 virtio_pci_msix_establish(struct virtio_pci_softc *sc, 945 struct virtio_pci_attach_args *vpa, int idx, 946 int (*handler)(void *), void *ih_arg) 947 { 948 struct virtio_softc *vsc = &sc->sc_sc; 949 pci_intr_handle_t ih; 950 951 KASSERT(idx < sc->sc_nintr); 952 953 if (pci_intr_map_msix(vpa->vpa_pa, idx, &ih) != 0) { 954 #if VIRTIO_DEBUG 955 printf("%s[%d]: pci_intr_map_msix failed\n", 956 vsc->sc_dev.dv_xname, idx); 957 #endif 958 return 1; 959 } 960 snprintf(sc->sc_intr[idx].name, sizeof(sc->sc_intr[idx].name), "%s:%d", 961 vsc->sc_child->dv_xname, idx); 962 sc->sc_intr[idx].ih = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl, 963 handler, ih_arg, sc->sc_intr[idx].name); 964 if (sc->sc_intr[idx].ih == NULL) { 965 printf("%s[%d]: couldn't establish msix interrupt\n", 966 vsc->sc_dev.dv_xname, idx); 967 return 1; 968 } 969 return 0; 970 } 971 972 void 973 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector) 974 { 975 if (sc->sc_sc.sc_version_1) { 976 CWRITE(sc, queue_select, idx); 977 CWRITE(sc, queue_msix_vector, vector); 978 } else { 979 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 980 VIRTIO_CONFIG_QUEUE_SELECT, idx); 981 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 982 VIRTIO_MSI_QUEUE_VECTOR, vector); 983 } 984 } 985 986 void 987 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector) 988 { 989 if (sc->sc_sc.sc_version_1) { 990 CWRITE(sc, config_msix_vector, vector); 991 } else { 992 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 993 VIRTIO_MSI_CONFIG_VECTOR, vector); 994 } 995 } 996 997 998 void 999 virtio_pci_free_irqs(struct virtio_pci_softc *sc) 1000 { 1001 struct virtio_softc *vsc = &sc->sc_sc; 1002 int i; 1003 1004 if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) { 1005 for (i = 0; i < vsc->sc_nvqs; i++) { 1006 virtio_pci_set_msix_queue_vector(sc, i, 1007 VIRTIO_MSI_NO_VECTOR); 1008 } 1009 } 1010 1011 for (i = 0; i < sc->sc_nintr; i++) { 1012 if (sc->sc_intr[i].ih) { 1013 pci_intr_disestablish(sc->sc_pc, sc->sc_intr[i].ih); 1014 sc->sc_intr[i].ih = NULL; 1015 } 1016 } 1017 1018 sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1019 virtio_pci_adjust_config_region(sc); 1020 } 1021 1022 int 1023 virtio_pci_setup_msix(struct virtio_pci_softc *sc, 1024 struct virtio_pci_attach_args *vpa, int shared) 1025 { 1026 struct virtio_softc *vsc = &sc->sc_sc; 1027 int i; 1028 1029 /* Shared needs config + queue */ 1030 if (shared && vpa->vpa_va.va_nintr < 1 + 1) 1031 return 1; 1032 /* Per VQ needs config + N * queue */ 1033 if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs) 1034 return 1; 1035 1036 if (virtio_pci_msix_establish(sc, vpa, 0, virtio_pci_config_intr, vsc)) 1037 return 1; 1038 sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1039 virtio_pci_adjust_config_region(sc); 1040 1041 if (shared) { 1042 if (virtio_pci_msix_establish(sc, vpa, 1, 1043 virtio_pci_shared_queue_intr, vsc)) { 1044 goto fail; 1045 } 1046 1047 for (i = 0; i < vsc->sc_nvqs; i++) 1048 vsc->sc_vqs[i].vq_intr_vec = 1; 1049 } else { 1050 for (i = 0; i < vsc->sc_nvqs; i++) { 1051 if (virtio_pci_msix_establish(sc, vpa, i + 1, 1052 virtio_pci_queue_intr, &vsc->sc_vqs[i])) { 1053 goto fail; 1054 } 1055 vsc->sc_vqs[i].vq_intr_vec = i + 1; 1056 } 1057 } 1058 1059 return 0; 1060 fail: 1061 virtio_pci_free_irqs(sc); 1062 return 1; 1063 } 1064 1065 /* 1066 * Interrupt handler. 1067 */ 1068 1069 /* 1070 * Only used without MSI-X 1071 */ 1072 int 1073 virtio_pci_legacy_intr(void *arg) 1074 { 1075 struct virtio_pci_softc *sc = arg; 1076 struct virtio_softc *vsc = &sc->sc_sc; 1077 int isr, r = 0; 1078 1079 /* check and ack the interrupt */ 1080 isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0); 1081 if (isr == 0) 1082 return 0; 1083 KERNEL_LOCK(); 1084 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1085 (vsc->sc_config_change != NULL)) { 1086 r = (vsc->sc_config_change)(vsc); 1087 } 1088 r |= virtio_check_vqs(vsc); 1089 KERNEL_UNLOCK(); 1090 1091 return r; 1092 } 1093 1094 int 1095 virtio_pci_legacy_intr_mpsafe(void *arg) 1096 { 1097 struct virtio_pci_softc *sc = arg; 1098 struct virtio_softc *vsc = &sc->sc_sc; 1099 int isr, r = 0; 1100 1101 /* check and ack the interrupt */ 1102 isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0); 1103 if (isr == 0) 1104 return 0; 1105 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1106 (vsc->sc_config_change != NULL)) { 1107 r = (vsc->sc_config_change)(vsc); 1108 } 1109 r |= virtio_check_vqs(vsc); 1110 return r; 1111 } 1112 1113 /* 1114 * Only used with MSI-X 1115 */ 1116 int 1117 virtio_pci_config_intr(void *arg) 1118 { 1119 struct virtio_softc *vsc = arg; 1120 1121 if (vsc->sc_config_change != NULL) 1122 return vsc->sc_config_change(vsc); 1123 return 0; 1124 } 1125 1126 /* 1127 * Only used with MSI-X 1128 */ 1129 int 1130 virtio_pci_queue_intr(void *arg) 1131 { 1132 struct virtqueue *vq = arg; 1133 struct virtio_softc *vsc = vq->vq_owner; 1134 1135 return virtio_check_vq(vsc, vq); 1136 } 1137 1138 int 1139 virtio_pci_shared_queue_intr(void *arg) 1140 { 1141 struct virtio_softc *vsc = arg; 1142 1143 return virtio_check_vqs(vsc); 1144 } 1145 1146 /* 1147 * Interrupt handler to be used when polling. 1148 * We cannot use isr here because it is not defined in MSI-X mode. 1149 */ 1150 int 1151 virtio_pci_poll_intr(void *arg) 1152 { 1153 struct virtio_pci_softc *sc = arg; 1154 struct virtio_softc *vsc = &sc->sc_sc; 1155 int r = 0; 1156 1157 if (vsc->sc_config_change != NULL) 1158 r = (vsc->sc_config_change)(vsc); 1159 1160 r |= virtio_check_vqs(vsc); 1161 1162 return r; 1163 } 1164 1165 void 1166 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx) 1167 { 1168 struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc; 1169 unsigned offset = 0; 1170 if (vsc->sc_version_1) { 1171 offset = vsc->sc_vqs[idx].vq_notify_off * 1172 sc->sc_notify_off_multiplier; 1173 } 1174 bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx); 1175 } 1176