1 /* $NetBSD: virtio_pci.c,v 1.43 2023/07/07 07:19:36 rin Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.43 2023/07/07 07:19:36 rin Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kmem.h> 36 #include <sys/module.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/syslog.h> 40 41 #include <sys/device.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 48 #include <dev/pci/virtio_pcireg.h> 49 50 #define VIRTIO_PRIVATE 51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 52 53 54 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \ 55 do { \ 56 if ((_use_log)) { \ 57 log(LOG_DEBUG, "%s: " _fmt, \ 58 device_xname((_sc)->sc_dev), \ 59 ##_args); \ 60 } else { \ 61 aprint_error_dev((_sc)->sc_dev, \ 62 _fmt, ##_args); \ 63 } \ 64 } while(0) 65 66 static int virtio_pci_match(device_t, cfdata_t, void *); 67 static void virtio_pci_attach(device_t, device_t, void *); 68 static int virtio_pci_rescan(device_t, const char *, const int *); 69 static int virtio_pci_detach(device_t, int); 70 71 72 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 73 sizeof(pcireg_t)) 74 struct virtio_pci_softc { 75 struct virtio_softc sc_sc; 76 bool sc_intr_pervq; 77 78 /* IO space */ 79 bus_space_tag_t sc_iot; 80 bus_space_handle_t sc_ioh; 81 bus_size_t sc_iosize; 82 bus_size_t sc_mapped_iosize; 83 84 /* BARs */ 85 bus_space_tag_t sc_bars_iot[NMAPREG]; 86 bus_space_handle_t sc_bars_ioh[NMAPREG]; 87 bus_size_t sc_bars_iosize[NMAPREG]; 88 89 /* notify space */ 90 bus_space_tag_t sc_notify_iot; 91 bus_space_handle_t sc_notify_ioh; 92 bus_size_t sc_notify_iosize; 93 uint32_t sc_notify_off_multiplier; 94 95 /* isr space */ 96 bus_space_tag_t sc_isr_iot; 97 bus_space_handle_t sc_isr_ioh; 98 bus_size_t sc_isr_iosize; 99 100 /* generic */ 101 struct pci_attach_args sc_pa; 102 pci_intr_handle_t *sc_ihp; 103 void **sc_ihs; 104 int sc_ihs_num; 105 int sc_devcfg_offset; /* for 0.9 */ 106 }; 107 108 static int virtio_pci_attach_09(device_t, void *); 109 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t); 110 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t); 111 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t); 112 static void virtio_pci_set_status_09(struct virtio_softc *, int); 113 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t); 114 115 static int virtio_pci_attach_10(device_t, void *); 116 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t); 117 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t); 118 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t); 119 static void virtio_pci_set_status_10(struct virtio_softc *, int); 120 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t); 121 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen); 122 123 static int virtio_pci_alloc_interrupts(struct virtio_softc *); 124 static void virtio_pci_free_interrupts(struct virtio_softc *); 125 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc); 126 static int virtio_pci_intr(void *arg); 127 static int virtio_pci_msix_queue_intr(void *); 128 static int virtio_pci_msix_config_intr(void *); 129 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int); 130 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int); 131 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *, 132 struct pci_attach_args *); 133 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *, 134 struct pci_attach_args *); 135 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *); 136 137 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 138 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 139 140 /* 141 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores 142 * are running in big-endian mode, with all peripheral being configured to 143 * little-endian mode. Their default bus_space(9) functions forcibly swap 144 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are 145 * correctly handled by bus_space(9), while DMA'ed ones should be swapped 146 * by hand, in violation of virtio(4) specifications. 147 */ 148 149 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN 150 # define READ_ENDIAN_09 BIG_ENDIAN 151 # define READ_ENDIAN_10 BIG_ENDIAN 152 # define STRUCT_ENDIAN_09 BIG_ENDIAN 153 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 154 #elif BYTE_ORDER == BIG_ENDIAN 155 # define READ_ENDIAN_09 LITTLE_ENDIAN 156 # define READ_ENDIAN_10 BIG_ENDIAN 157 # define STRUCT_ENDIAN_09 BIG_ENDIAN 158 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 159 #else /* little endian */ 160 # define READ_ENDIAN_09 LITTLE_ENDIAN 161 # define READ_ENDIAN_10 LITTLE_ENDIAN 162 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN 163 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 164 #endif 165 166 167 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc), 168 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL, 169 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN); 170 171 static const struct virtio_ops virtio_pci_ops_09 = { 172 .kick = virtio_pci_kick_09, 173 .read_queue_size = virtio_pci_read_queue_size_09, 174 .setup_queue = virtio_pci_setup_queue_09, 175 .set_status = virtio_pci_set_status_09, 176 .neg_features = virtio_pci_negotiate_features_09, 177 .alloc_interrupts = virtio_pci_alloc_interrupts, 178 .free_interrupts = virtio_pci_free_interrupts, 179 .setup_interrupts = virtio_pci_setup_interrupts_09, 180 }; 181 182 static const struct virtio_ops virtio_pci_ops_10 = { 183 .kick = virtio_pci_kick_10, 184 .read_queue_size = virtio_pci_read_queue_size_10, 185 .setup_queue = virtio_pci_setup_queue_10, 186 .set_status = virtio_pci_set_status_10, 187 .neg_features = virtio_pci_negotiate_features_10, 188 .alloc_interrupts = virtio_pci_alloc_interrupts, 189 .free_interrupts = virtio_pci_free_interrupts, 190 .setup_interrupts = virtio_pci_setup_interrupts_10, 191 }; 192 193 static int 194 virtio_pci_match(device_t parent, cfdata_t match, void *aux) 195 { 196 struct pci_attach_args *pa; 197 198 pa = (struct pci_attach_args *)aux; 199 switch (PCI_VENDOR(pa->pa_id)) { 200 case PCI_VENDOR_QUMRANET: 201 /* Transitional devices MUST have a PCI Revision ID of 0. */ 202 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 203 PCI_PRODUCT(pa->pa_id)) && 204 (PCI_PRODUCT(pa->pa_id) <= 205 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) && 206 PCI_REVISION(pa->pa_class) == 0) 207 return 1; 208 /* 209 * Non-transitional devices SHOULD have a PCI Revision 210 * ID of 1 or higher. Drivers MUST match any PCI 211 * Revision ID value. 212 */ 213 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <= 214 PCI_PRODUCT(pa->pa_id)) && 215 (PCI_PRODUCT(pa->pa_id) <= 216 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) && 217 /* XXX: TODO */ 218 PCI_REVISION(pa->pa_class) == 1) 219 return 1; 220 break; 221 } 222 223 return 0; 224 } 225 226 static void 227 virtio_pci_attach(device_t parent, device_t self, void *aux) 228 { 229 struct virtio_pci_softc * const psc = device_private(self); 230 struct virtio_softc * const sc = &psc->sc_sc; 231 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 232 pci_chipset_tag_t pc = pa->pa_pc; 233 pcitag_t tag = pa->pa_tag; 234 int revision; 235 int ret; 236 pcireg_t id; 237 pcireg_t csr; 238 239 revision = PCI_REVISION(pa->pa_class); 240 switch (revision) { 241 case 0: 242 /* subsystem ID shows what I am */ 243 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 244 break; 245 case 1: 246 /* pci product number shows what I am */ 247 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040; 248 break; 249 default: 250 aprint_normal(": unknown revision 0x%02x; giving up\n", 251 revision); 252 return; 253 } 254 255 aprint_normal("\n"); 256 aprint_naive("\n"); 257 virtio_print_device_type(self, id, revision); 258 259 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 260 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE; 261 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 262 263 sc->sc_dev = self; 264 psc->sc_pa = *pa; 265 psc->sc_iot = pa->pa_iot; 266 267 sc->sc_dmat = pa->pa_dmat; 268 if (pci_dma64_available(pa)) 269 sc->sc_dmat = pa->pa_dmat64; 270 271 /* attach is dependent on revision */ 272 ret = 0; 273 if (revision == 1) { 274 /* try to attach 1.0 */ 275 ret = virtio_pci_attach_10(self, aux); 276 } 277 if (ret == 0 && revision == 0) { 278 /* revision 0 means 0.9 only or both 0.9 and 1.0 */ 279 ret = virtio_pci_attach_09(self, aux); 280 } 281 if (ret) { 282 aprint_error_dev(self, "cannot attach (%d)\n", ret); 283 return; 284 } 285 KASSERT(sc->sc_ops); 286 287 /* preset config region */ 288 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 289 if (virtio_pci_adjust_config_region(psc)) 290 return; 291 292 /* generic */ 293 virtio_device_reset(sc); 294 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 295 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 296 297 sc->sc_childdevid = id; 298 sc->sc_child = NULL; 299 virtio_pci_rescan(self, NULL, NULL); 300 return; 301 } 302 303 /* ARGSUSED */ 304 static int 305 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs) 306 { 307 struct virtio_pci_softc * const psc = device_private(self); 308 struct virtio_softc * const sc = &psc->sc_sc; 309 struct virtio_attach_args va; 310 311 if (sc->sc_child) /* Child already attached? */ 312 return 0; 313 314 memset(&va, 0, sizeof(va)); 315 va.sc_childdevid = sc->sc_childdevid; 316 317 config_found(self, &va, NULL, CFARGS_NONE); 318 319 if (virtio_attach_failed(sc)) 320 return 0; 321 322 return 0; 323 } 324 325 326 static int 327 virtio_pci_detach(device_t self, int flags) 328 { 329 struct virtio_pci_softc * const psc = device_private(self); 330 struct virtio_softc * const sc = &psc->sc_sc; 331 int r; 332 333 r = config_detach_children(self, flags); 334 if (r != 0) 335 return r; 336 337 /* Check that child never attached, or detached properly */ 338 KASSERT(sc->sc_child == NULL); 339 KASSERT(sc->sc_vqs == NULL); 340 KASSERT(psc->sc_ihs_num == 0); 341 342 if (psc->sc_iosize) 343 bus_space_unmap(psc->sc_iot, psc->sc_ioh, 344 psc->sc_mapped_iosize); 345 psc->sc_iosize = 0; 346 347 return 0; 348 } 349 350 351 static int 352 virtio_pci_attach_09(device_t self, void *aux) 353 //struct virtio_pci_softc *psc, struct pci_attach_args *pa) 354 { 355 struct virtio_pci_softc * const psc = device_private(self); 356 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 357 struct virtio_softc * const sc = &psc->sc_sc; 358 // pci_chipset_tag_t pc = pa->pa_pc; 359 // pcitag_t tag = pa->pa_tag; 360 361 /* complete IO region */ 362 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 363 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) { 364 aprint_error_dev(self, "can't map i/o space\n"); 365 return EIO; 366 } 367 psc->sc_mapped_iosize = psc->sc_iosize; 368 369 /* queue space */ 370 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 371 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) { 372 aprint_error_dev(self, "can't map notify i/o space\n"); 373 return EIO; 374 } 375 psc->sc_notify_iosize = 2; 376 psc->sc_notify_iot = psc->sc_iot; 377 378 /* ISR space */ 379 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 380 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) { 381 aprint_error_dev(self, "can't map isr i/o space\n"); 382 return EIO; 383 } 384 psc->sc_isr_iosize = 1; 385 psc->sc_isr_iot = psc->sc_iot; 386 387 /* set our version 0.9 ops */ 388 sc->sc_ops = &virtio_pci_ops_09; 389 sc->sc_bus_endian = READ_ENDIAN_09; 390 sc->sc_struct_endian = STRUCT_ENDIAN_09; 391 return 0; 392 } 393 394 395 static int 396 virtio_pci_attach_10(device_t self, void *aux) 397 { 398 struct virtio_pci_softc * const psc = device_private(self); 399 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 400 struct virtio_softc * const sc = &psc->sc_sc; 401 pci_chipset_tag_t pc = pa->pa_pc; 402 pcitag_t tag = pa->pa_tag; 403 404 struct virtio_pci_cap common, isr, device; 405 struct virtio_pci_notify_cap notify; 406 int have_device_cfg = 0; 407 bus_size_t bars[NMAPREG] = { 0 }; 408 int bars_idx[NMAPREG] = { 0 }; 409 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 410 int i, j, ret = 0; 411 412 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG, 413 &common, sizeof(common))) 414 return ENODEV; 415 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG, 416 ¬ify, sizeof(notify))) 417 return ENODEV; 418 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG, 419 &isr, sizeof(isr))) 420 return ENODEV; 421 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG, 422 &device, sizeof(device))) 423 memset(&device, 0, sizeof(device)); 424 else 425 have_device_cfg = 1; 426 427 /* Figure out which bars we need to map */ 428 for (i = 0; i < __arraycount(caps); i++) { 429 int bar = caps[i]->bar; 430 bus_size_t len = caps[i]->offset + caps[i]->length; 431 if (caps[i]->length == 0) 432 continue; 433 if (bars[bar] < len) 434 bars[bar] = len; 435 } 436 437 for (i = j = 0; i < __arraycount(bars); i++) { 438 int reg; 439 pcireg_t type; 440 if (bars[i] == 0) 441 continue; 442 reg = PCI_BAR(i); 443 type = pci_mapreg_type(pc, tag, reg); 444 if (pci_mapreg_map(pa, reg, type, 0, 445 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j], 446 NULL, &psc->sc_bars_iosize[j])) { 447 aprint_error_dev(self, "can't map bar %u \n", i); 448 ret = EIO; 449 goto err; 450 } 451 aprint_debug_dev(self, 452 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n", 453 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]); 454 bars_idx[i] = j; 455 j++; 456 } 457 458 i = bars_idx[notify.cap.bar]; 459 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 460 notify.cap.offset, notify.cap.length, 461 &psc->sc_notify_ioh)) { 462 aprint_error_dev(self, "can't map notify i/o space\n"); 463 ret = EIO; 464 goto err; 465 } 466 psc->sc_notify_iosize = notify.cap.length; 467 psc->sc_notify_iot = psc->sc_bars_iot[i]; 468 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier); 469 470 if (have_device_cfg) { 471 i = bars_idx[device.bar]; 472 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 473 device.offset, device.length, 474 &sc->sc_devcfg_ioh)) { 475 aprint_error_dev(self, "can't map devcfg i/o space\n"); 476 ret = EIO; 477 goto err; 478 } 479 aprint_debug_dev(self, 480 "device.offset = 0x%x, device.length = 0x%x\n", 481 device.offset, device.length); 482 sc->sc_devcfg_iosize = device.length; 483 sc->sc_devcfg_iot = psc->sc_bars_iot[i]; 484 } 485 486 i = bars_idx[isr.bar]; 487 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 488 isr.offset, isr.length, &psc->sc_isr_ioh)) { 489 aprint_error_dev(self, "can't map isr i/o space\n"); 490 ret = EIO; 491 goto err; 492 } 493 psc->sc_isr_iosize = isr.length; 494 psc->sc_isr_iot = psc->sc_bars_iot[i]; 495 496 i = bars_idx[common.bar]; 497 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 498 common.offset, common.length, &psc->sc_ioh)) { 499 aprint_error_dev(self, "can't map common i/o space\n"); 500 ret = EIO; 501 goto err; 502 } 503 psc->sc_iosize = common.length; 504 psc->sc_iot = psc->sc_bars_iot[i]; 505 psc->sc_mapped_iosize = psc->sc_bars_iosize[i]; 506 507 psc->sc_sc.sc_version_1 = 1; 508 509 /* set our version 1.0 ops */ 510 sc->sc_ops = &virtio_pci_ops_10; 511 sc->sc_bus_endian = READ_ENDIAN_10; 512 sc->sc_struct_endian = STRUCT_ENDIAN_10; 513 return 0; 514 515 err: 516 /* undo our pci_mapreg_map()s */ 517 for (i = 0; i < __arraycount(bars); i++) { 518 if (psc->sc_bars_iosize[i] == 0) 519 continue; 520 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 521 psc->sc_bars_iosize[i]); 522 } 523 return ret; 524 } 525 526 /* v1.0 attach helper */ 527 static int 528 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen) 529 { 530 device_t self = psc->sc_sc.sc_dev; 531 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 532 pcitag_t tag = psc->sc_pa.pa_tag; 533 unsigned int offset, i, len; 534 union { 535 pcireg_t reg[8]; 536 struct virtio_pci_cap vcap; 537 } *v = buf; 538 539 if (buflen < sizeof(struct virtio_pci_cap)) 540 return ERANGE; 541 542 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 543 return ENOENT; 544 545 do { 546 for (i = 0; i < 4; i++) 547 v->reg[i] = 548 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 549 if (v->vcap.cfg_type == cfg_type) 550 break; 551 offset = v->vcap.cap_next; 552 } while (offset != 0); 553 554 if (offset == 0) 555 return ENOENT; 556 557 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 558 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 559 if (len > buflen) { 560 aprint_error_dev(self, "%s cap too large\n", __func__); 561 return ERANGE; 562 } 563 for (i = 4; i < len / sizeof(pcireg_t); i++) 564 v->reg[i] = 565 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 566 } 567 568 /* endian fixup */ 569 v->vcap.offset = le32toh(v->vcap.offset); 570 v->vcap.length = le32toh(v->vcap.length); 571 return 0; 572 } 573 574 575 /* ------------------------------------- 576 * Version 0.9 support 577 * -------------------------------------*/ 578 579 static void 580 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx) 581 { 582 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 583 584 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx); 585 } 586 587 /* only applicable for v 0.9 but also called for 1.0 */ 588 static int 589 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc) 590 { 591 struct virtio_softc * const sc = &psc->sc_sc; 592 device_t self = sc->sc_dev; 593 594 if (psc->sc_sc.sc_version_1) 595 return 0; 596 597 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset; 598 sc->sc_devcfg_iot = psc->sc_iot; 599 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 600 psc->sc_devcfg_offset, sc->sc_devcfg_iosize, 601 &sc->sc_devcfg_ioh)) { 602 aprint_error_dev(self, "can't map config i/o space\n"); 603 return EIO; 604 } 605 606 return 0; 607 } 608 609 static uint16_t 610 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx) 611 { 612 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 613 614 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 615 VIRTIO_CONFIG_QUEUE_SELECT, idx); 616 return bus_space_read_2(psc->sc_iot, psc->sc_ioh, 617 VIRTIO_CONFIG_QUEUE_SIZE); 618 } 619 620 static void 621 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 622 { 623 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 624 625 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 626 VIRTIO_CONFIG_QUEUE_SELECT, idx); 627 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 628 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 629 630 if (psc->sc_ihs_num > 1) { 631 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 632 if (psc->sc_intr_pervq) 633 vec += idx; 634 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 635 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); 636 } 637 } 638 639 static void 640 virtio_pci_set_status_09(struct virtio_softc *sc, int status) 641 { 642 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 643 int old = 0; 644 645 if (status != 0) { 646 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh, 647 VIRTIO_CONFIG_DEVICE_STATUS); 648 } 649 bus_space_write_1(psc->sc_iot, psc->sc_ioh, 650 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 651 } 652 653 static void 654 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features) 655 { 656 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 657 uint32_t r; 658 659 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh, 660 VIRTIO_CONFIG_DEVICE_FEATURES); 661 662 r &= guest_features; 663 664 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 665 VIRTIO_CONFIG_GUEST_FEATURES, r); 666 667 sc->sc_active_features = r; 668 } 669 670 /* ------------------------------------- 671 * Version 1.0 support 672 * -------------------------------------*/ 673 674 static void 675 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx) 676 { 677 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 678 unsigned offset = sc->sc_vqs[idx].vq_notify_off * 679 psc->sc_notify_off_multiplier; 680 681 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx); 682 } 683 684 685 static uint16_t 686 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx) 687 { 688 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 689 bus_space_tag_t iot = psc->sc_iot; 690 bus_space_handle_t ioh = psc->sc_ioh; 691 692 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx); 693 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE); 694 } 695 696 /* 697 * By definition little endian only in v1.0. NB: "MAY" in the text 698 * below refers to "independently" (i.e. the order of accesses) not 699 * "32-bit" (which is restricted by the earlier "MUST"). 700 * 701 * 4.1.3.1 Driver Requirements: PCI Device Layout 702 * 703 * For device configuration access, the driver MUST use ... 32-bit 704 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit 705 * fields, the driver MAY access each of the high and low 32-bit parts 706 * of the field independently. 707 */ 708 static __inline void 709 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh, 710 bus_size_t offset, uint64_t value) 711 { 712 #if _QUAD_HIGHWORD 713 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value)); 714 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value)); 715 #else 716 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value)); 717 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value)); 718 #endif 719 } 720 721 static void 722 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 723 { 724 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 725 struct virtqueue *vq = &sc->sc_vqs[idx]; 726 bus_space_tag_t iot = psc->sc_iot; 727 bus_space_handle_t ioh = psc->sc_ioh; 728 KASSERT(vq->vq_index == idx); 729 730 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 731 if (addr == 0) { 732 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0); 733 virtio_pci_bus_space_write_8(iot, ioh, 734 VIRTIO_CONFIG1_QUEUE_DESC, 0); 735 virtio_pci_bus_space_write_8(iot, ioh, 736 VIRTIO_CONFIG1_QUEUE_AVAIL, 0); 737 virtio_pci_bus_space_write_8(iot, ioh, 738 VIRTIO_CONFIG1_QUEUE_USED, 0); 739 } else { 740 virtio_pci_bus_space_write_8(iot, ioh, 741 VIRTIO_CONFIG1_QUEUE_DESC, addr); 742 virtio_pci_bus_space_write_8(iot, ioh, 743 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 744 virtio_pci_bus_space_write_8(iot, ioh, 745 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 746 bus_space_write_2(iot, ioh, 747 VIRTIO_CONFIG1_QUEUE_ENABLE, 1); 748 vq->vq_notify_off = bus_space_read_2(iot, ioh, 749 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF); 750 } 751 752 if (psc->sc_ihs_num > 1) { 753 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 754 if (psc->sc_intr_pervq) 755 vec += idx; 756 bus_space_write_2(iot, ioh, 757 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); 758 } 759 } 760 761 static void 762 virtio_pci_set_status_10(struct virtio_softc *sc, int status) 763 { 764 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 765 bus_space_tag_t iot = psc->sc_iot; 766 bus_space_handle_t ioh = psc->sc_ioh; 767 int old = 0; 768 769 if (status) 770 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 771 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old); 772 } 773 774 void 775 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features) 776 { 777 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 778 device_t self = sc->sc_dev; 779 bus_space_tag_t iot = psc->sc_iot; 780 bus_space_handle_t ioh = psc->sc_ioh; 781 uint64_t host, negotiated, device_status; 782 783 guest_features |= VIRTIO_F_VERSION_1; 784 /* notify on empty is 0.9 only */ 785 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 786 sc->sc_active_features = 0; 787 788 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0); 789 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE); 790 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1); 791 host |= (uint64_t) 792 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32; 793 794 negotiated = host & guest_features; 795 796 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0); 797 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 798 negotiated & 0xffffffff); 799 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1); 800 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 801 negotiated >> 32); 802 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 803 804 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 805 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 806 aprint_error_dev(self, "feature negotiation failed\n"); 807 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 808 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 809 return; 810 } 811 812 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 813 aprint_error_dev(self, "host rejected version 1\n"); 814 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 815 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 816 return; 817 } 818 819 sc->sc_active_features = negotiated; 820 return; 821 } 822 823 824 /* ------------------------------------- 825 * Generic PCI interrupt code 826 * -------------------------------------*/ 827 828 static int 829 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit) 830 { 831 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 832 bus_space_tag_t iot = psc->sc_iot; 833 bus_space_handle_t ioh = psc->sc_ioh; 834 int vector, ret, qid; 835 836 if (!virtio_pci_msix_enabled(psc)) 837 return 0; 838 839 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 840 bus_space_write_2(iot, ioh, 841 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector); 842 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR); 843 if (ret != vector) { 844 VIRTIO_PCI_LOG(sc, reinit, 845 "can't set config msix vector\n"); 846 return -1; 847 } 848 849 for (qid = 0; qid < sc->sc_nvqs; qid++) { 850 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 851 852 if (psc->sc_intr_pervq) 853 vector += qid; 854 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); 855 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, 856 vector); 857 ret = bus_space_read_2(iot, ioh, 858 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR); 859 if (ret != vector) { 860 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 861 "msix vector\n", qid); 862 return -1; 863 } 864 } 865 866 return 0; 867 } 868 869 static int 870 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit) 871 { 872 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 873 int offset, vector, ret, qid; 874 875 if (!virtio_pci_msix_enabled(psc)) 876 return 0; 877 878 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 879 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 880 881 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 882 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 883 if (ret != vector) { 884 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n", 885 __func__, vector, ret); 886 VIRTIO_PCI_LOG(sc, reinit, 887 "can't set config msix vector\n"); 888 return -1; 889 } 890 891 for (qid = 0; qid < sc->sc_nvqs; qid++) { 892 offset = VIRTIO_CONFIG_QUEUE_SELECT; 893 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid); 894 895 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 896 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 897 898 if (psc->sc_intr_pervq) 899 vector += qid; 900 901 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 902 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 903 if (ret != vector) { 904 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:" 905 " expected=%d, actual=%d\n", 906 __func__, qid, vector, ret); 907 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 908 "msix vector\n", qid); 909 return -1; 910 } 911 } 912 913 return 0; 914 } 915 916 static int 917 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc, 918 struct pci_attach_args *pa) 919 { 920 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 921 device_t self = sc->sc_dev; 922 pci_chipset_tag_t pc = pa->pa_pc; 923 struct virtqueue *vq; 924 char intrbuf[PCI_INTRSTR_LEN]; 925 char intr_xname[INTRDEVNAMEBUF]; 926 char const *intrstr; 927 int idx, qid, n; 928 929 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 930 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 931 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 932 933 snprintf(intr_xname, sizeof(intr_xname), "%s config", 934 device_xname(sc->sc_dev)); 935 936 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 937 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname); 938 if (psc->sc_ihs[idx] == NULL) { 939 aprint_error_dev(self, "couldn't establish MSI-X for config\n"); 940 goto error; 941 } 942 943 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 944 if (psc->sc_intr_pervq) { 945 for (qid = 0; qid < sc->sc_nvqs; qid++) { 946 n = idx + qid; 947 vq = &sc->sc_vqs[qid]; 948 949 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 950 device_xname(sc->sc_dev), qid); 951 952 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 953 pci_intr_setattr(pc, &psc->sc_ihp[n], 954 PCI_INTR_MPSAFE, true); 955 } 956 957 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n], 958 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname); 959 if (psc->sc_ihs[n] == NULL) { 960 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n"); 961 goto error; 962 } 963 } 964 } else { 965 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 966 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 967 968 snprintf(intr_xname, sizeof(intr_xname), "%s queues", 969 device_xname(sc->sc_dev)); 970 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 971 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname); 972 if (psc->sc_ihs[idx] == NULL) { 973 aprint_error_dev(self, "couldn't establish MSI-X for queues\n"); 974 goto error; 975 } 976 } 977 978 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 979 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 980 aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 981 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 982 if (psc->sc_intr_pervq) { 983 kcpuset_t *affinity; 984 int affinity_to, r; 985 986 kcpuset_create(&affinity, false); 987 988 for (qid = 0; qid < sc->sc_nvqs; qid++) { 989 n = idx + qid; 990 affinity_to = (qid / 2) % ncpu; 991 992 intrstr = pci_intr_string(pc, psc->sc_ihp[n], 993 intrbuf, sizeof(intrbuf)); 994 995 kcpuset_zero(affinity); 996 kcpuset_set(affinity, affinity_to); 997 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL); 998 if (r == 0) { 999 aprint_normal_dev(self, 1000 "for vq #%d interrupting at %s affinity to %u\n", 1001 qid, intrstr, affinity_to); 1002 } else { 1003 aprint_normal_dev(self, 1004 "for vq #%d interrupting at %s\n", 1005 qid, intrstr); 1006 } 1007 } 1008 1009 kcpuset_destroy(affinity); 1010 } else { 1011 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 1012 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr); 1013 } 1014 1015 return 0; 1016 1017 error: 1018 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1019 if (psc->sc_ihs[idx] != NULL) 1020 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1021 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1022 if (psc->sc_intr_pervq) { 1023 for (qid = 0; qid < sc->sc_nvqs; qid++) { 1024 n = idx + qid; 1025 if (psc->sc_ihs[n] == NULL) 1026 continue; 1027 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]); 1028 } 1029 1030 } else { 1031 if (psc->sc_ihs[idx] != NULL) 1032 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1033 } 1034 1035 return -1; 1036 } 1037 1038 static int 1039 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc, 1040 struct pci_attach_args *pa) 1041 { 1042 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1043 device_t self = sc->sc_dev; 1044 pci_chipset_tag_t pc = pa->pa_pc; 1045 char intrbuf[PCI_INTRSTR_LEN]; 1046 char const *intrstr; 1047 1048 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1049 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true); 1050 1051 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0], 1052 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev)); 1053 if (psc->sc_ihs[0] == NULL) { 1054 aprint_error_dev(self, "couldn't establish INTx\n"); 1055 return -1; 1056 } 1057 1058 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf)); 1059 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1060 1061 return 0; 1062 } 1063 1064 static int 1065 virtio_pci_alloc_interrupts(struct virtio_softc *sc) 1066 { 1067 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1068 device_t self = sc->sc_dev; 1069 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1070 pcitag_t tag = psc->sc_pa.pa_tag; 1071 int error; 1072 int nmsix; 1073 int off; 1074 int counts[PCI_INTR_TYPE_SIZE]; 1075 pci_intr_type_t max_type; 1076 pcireg_t ctl; 1077 1078 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag); 1079 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 1080 1081 /* We need at least two: one for config and the other for queues */ 1082 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) { 1083 /* Try INTx only */ 1084 max_type = PCI_INTR_TYPE_INTX; 1085 counts[PCI_INTR_TYPE_INTX] = 1; 1086 } else { 1087 /* Try MSI-X first and INTx second */ 1088 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) && 1089 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { 1090 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1091 } else { 1092 nmsix = 2; 1093 } 1094 1095 max_type = PCI_INTR_TYPE_MSIX; 1096 counts[PCI_INTR_TYPE_MSIX] = nmsix; 1097 counts[PCI_INTR_TYPE_MSI] = 0; 1098 counts[PCI_INTR_TYPE_INTX] = 1; 1099 } 1100 1101 retry: 1102 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type); 1103 if (error != 0) { 1104 aprint_error_dev(self, "couldn't map interrupt\n"); 1105 return -1; 1106 } 1107 1108 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 1109 psc->sc_intr_pervq = nmsix > 2 ? true : false; 1110 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, 1111 KM_SLEEP); 1112 1113 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa); 1114 if (error != 0) { 1115 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix); 1116 pci_intr_release(pc, psc->sc_ihp, nmsix); 1117 1118 /* Retry INTx */ 1119 max_type = PCI_INTR_TYPE_INTX; 1120 counts[PCI_INTR_TYPE_INTX] = 1; 1121 goto retry; 1122 } 1123 1124 psc->sc_ihs_num = nmsix; 1125 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1126 virtio_pci_adjust_config_region(psc); 1127 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 1128 psc->sc_intr_pervq = false; 1129 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, 1130 KM_SLEEP); 1131 1132 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa); 1133 if (error != 0) { 1134 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1); 1135 pci_intr_release(pc, psc->sc_ihp, 1); 1136 return -1; 1137 } 1138 1139 psc->sc_ihs_num = 1; 1140 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1141 virtio_pci_adjust_config_region(psc); 1142 1143 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL); 1144 if (error != 0) { 1145 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 1146 ctl &= ~PCI_MSIX_CTL_ENABLE; 1147 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 1148 } 1149 } 1150 1151 if (!psc->sc_intr_pervq) 1152 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ); 1153 return 0; 1154 } 1155 1156 static void 1157 virtio_pci_free_interrupts(struct virtio_softc *sc) 1158 { 1159 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1160 1161 for (int i = 0; i < psc->sc_ihs_num; i++) { 1162 if (psc->sc_ihs[i] == NULL) 1163 continue; 1164 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]); 1165 psc->sc_ihs[i] = NULL; 1166 } 1167 1168 if (psc->sc_ihs_num > 0) 1169 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num); 1170 1171 if (psc->sc_ihs != NULL) { 1172 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num); 1173 psc->sc_ihs = NULL; 1174 } 1175 psc->sc_ihs_num = 0; 1176 } 1177 1178 static bool 1179 virtio_pci_msix_enabled(struct virtio_pci_softc *psc) 1180 { 1181 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1182 1183 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) 1184 return true; 1185 1186 return false; 1187 } 1188 1189 /* 1190 * Interrupt handler. 1191 */ 1192 static int 1193 virtio_pci_intr(void *arg) 1194 { 1195 struct virtio_softc *sc = arg; 1196 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1197 int isr, r = 0; 1198 1199 /* check and ack the interrupt */ 1200 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0); 1201 if (isr == 0) 1202 return 0; 1203 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1204 (sc->sc_config_change != NULL)) 1205 r = (sc->sc_config_change)(sc); 1206 if (sc->sc_intrhand != NULL) { 1207 if (sc->sc_soft_ih != NULL) 1208 softint_schedule(sc->sc_soft_ih); 1209 else 1210 r |= (sc->sc_intrhand)(sc); 1211 } 1212 1213 return r; 1214 } 1215 1216 static int 1217 virtio_pci_msix_queue_intr(void *arg) 1218 { 1219 struct virtio_softc *sc = arg; 1220 int r = 0; 1221 1222 if (sc->sc_intrhand != NULL) { 1223 if (sc->sc_soft_ih != NULL) 1224 softint_schedule(sc->sc_soft_ih); 1225 else 1226 r |= (sc->sc_intrhand)(sc); 1227 } 1228 1229 return r; 1230 } 1231 1232 static int 1233 virtio_pci_msix_config_intr(void *arg) 1234 { 1235 struct virtio_softc *sc = arg; 1236 int r = 0; 1237 1238 if (sc->sc_config_change != NULL) 1239 r = (sc->sc_config_change)(sc); 1240 return r; 1241 } 1242 1243 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio"); 1244 1245 #ifdef _MODULE 1246 #include "ioconf.c" 1247 #endif 1248 1249 static int 1250 virtio_pci_modcmd(modcmd_t cmd, void *opaque) 1251 { 1252 int error = 0; 1253 1254 #ifdef _MODULE 1255 switch (cmd) { 1256 case MODULE_CMD_INIT: 1257 error = config_init_component(cfdriver_ioconf_virtio_pci, 1258 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1259 break; 1260 case MODULE_CMD_FINI: 1261 error = config_fini_component(cfdriver_ioconf_virtio_pci, 1262 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1263 break; 1264 default: 1265 error = ENOTTY; 1266 break; 1267 } 1268 #endif 1269 1270 return error; 1271 } 1272