1 /* $NetBSD: virtio_pci.c,v 1.44 2023/11/19 19:49:44 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.44 2023/11/19 19:49:44 thorpej Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kmem.h> 36 #include <sys/module.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/syslog.h> 40 41 #include <sys/device.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 48 #include <dev/pci/virtio_pcireg.h> 49 50 #define VIRTIO_PRIVATE 51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 52 53 #if defined(__alpha__) || defined(__sparc64__) 54 /* 55 * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA 56 * XXX to work on these platforms, at least by Qemu. 57 * XXX 58 * XXX Generalize this later. 59 */ 60 #define __NEED_VIRTIO_F_ACCESS_PLATFORM 61 #endif /* __alpha__ || __sparc64__ */ 62 63 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \ 64 do { \ 65 if ((_use_log)) { \ 66 log(LOG_DEBUG, "%s: " _fmt, \ 67 device_xname((_sc)->sc_dev), \ 68 ##_args); \ 69 } else { \ 70 aprint_error_dev((_sc)->sc_dev, \ 71 _fmt, ##_args); \ 72 } \ 73 } while(0) 74 75 static int virtio_pci_match(device_t, cfdata_t, void *); 76 static void virtio_pci_attach(device_t, device_t, void *); 77 static int virtio_pci_rescan(device_t, const char *, const int *); 78 static int virtio_pci_detach(device_t, int); 79 80 81 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 82 sizeof(pcireg_t)) 83 struct virtio_pci_softc { 84 struct virtio_softc sc_sc; 85 bool sc_intr_pervq; 86 87 /* IO space */ 88 bus_space_tag_t sc_iot; 89 bus_space_handle_t sc_ioh; 90 bus_size_t sc_iosize; 91 bus_size_t sc_mapped_iosize; 92 93 /* BARs */ 94 bus_space_tag_t sc_bars_iot[NMAPREG]; 95 bus_space_handle_t sc_bars_ioh[NMAPREG]; 96 bus_size_t sc_bars_iosize[NMAPREG]; 97 98 /* notify space */ 99 bus_space_tag_t sc_notify_iot; 100 bus_space_handle_t sc_notify_ioh; 101 bus_size_t sc_notify_iosize; 102 uint32_t sc_notify_off_multiplier; 103 104 /* isr space */ 105 bus_space_tag_t sc_isr_iot; 106 bus_space_handle_t sc_isr_ioh; 107 bus_size_t sc_isr_iosize; 108 109 /* generic */ 110 struct pci_attach_args sc_pa; 111 pci_intr_handle_t *sc_ihp; 112 void **sc_ihs; 113 int sc_ihs_num; 114 int sc_devcfg_offset; /* for 0.9 */ 115 }; 116 117 static int virtio_pci_attach_09(device_t, void *); 118 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t); 119 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t); 120 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t); 121 static void virtio_pci_set_status_09(struct virtio_softc *, int); 122 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t); 123 124 static int virtio_pci_attach_10(device_t, void *); 125 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t); 126 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t); 127 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t); 128 static void virtio_pci_set_status_10(struct virtio_softc *, int); 129 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t); 130 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen); 131 132 static int virtio_pci_alloc_interrupts(struct virtio_softc *); 133 static void virtio_pci_free_interrupts(struct virtio_softc *); 134 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc); 135 static int virtio_pci_intr(void *arg); 136 static int virtio_pci_msix_queue_intr(void *); 137 static int virtio_pci_msix_config_intr(void *); 138 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int); 139 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int); 140 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *, 141 struct pci_attach_args *); 142 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *, 143 struct pci_attach_args *); 144 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *); 145 146 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 147 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 148 149 /* 150 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores 151 * are running in big-endian mode, with all peripheral being configured to 152 * little-endian mode. Their default bus_space(9) functions forcibly swap 153 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are 154 * correctly handled by bus_space(9), while DMA'ed ones should be swapped 155 * by hand, in violation of virtio(4) specifications. 156 */ 157 158 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN 159 # define READ_ENDIAN_09 BIG_ENDIAN 160 # define READ_ENDIAN_10 BIG_ENDIAN 161 # define STRUCT_ENDIAN_09 BIG_ENDIAN 162 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 163 #elif BYTE_ORDER == BIG_ENDIAN 164 # define READ_ENDIAN_09 LITTLE_ENDIAN 165 # define READ_ENDIAN_10 BIG_ENDIAN 166 # define STRUCT_ENDIAN_09 BIG_ENDIAN 167 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 168 #else /* little endian */ 169 # define READ_ENDIAN_09 LITTLE_ENDIAN 170 # define READ_ENDIAN_10 LITTLE_ENDIAN 171 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN 172 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 173 #endif 174 175 176 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc), 177 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL, 178 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN); 179 180 static const struct virtio_ops virtio_pci_ops_09 = { 181 .kick = virtio_pci_kick_09, 182 .read_queue_size = virtio_pci_read_queue_size_09, 183 .setup_queue = virtio_pci_setup_queue_09, 184 .set_status = virtio_pci_set_status_09, 185 .neg_features = virtio_pci_negotiate_features_09, 186 .alloc_interrupts = virtio_pci_alloc_interrupts, 187 .free_interrupts = virtio_pci_free_interrupts, 188 .setup_interrupts = virtio_pci_setup_interrupts_09, 189 }; 190 191 static const struct virtio_ops virtio_pci_ops_10 = { 192 .kick = virtio_pci_kick_10, 193 .read_queue_size = virtio_pci_read_queue_size_10, 194 .setup_queue = virtio_pci_setup_queue_10, 195 .set_status = virtio_pci_set_status_10, 196 .neg_features = virtio_pci_negotiate_features_10, 197 .alloc_interrupts = virtio_pci_alloc_interrupts, 198 .free_interrupts = virtio_pci_free_interrupts, 199 .setup_interrupts = virtio_pci_setup_interrupts_10, 200 }; 201 202 static int 203 virtio_pci_match(device_t parent, cfdata_t match, void *aux) 204 { 205 struct pci_attach_args *pa; 206 207 pa = (struct pci_attach_args *)aux; 208 switch (PCI_VENDOR(pa->pa_id)) { 209 case PCI_VENDOR_QUMRANET: 210 /* Transitional devices MUST have a PCI Revision ID of 0. */ 211 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 212 PCI_PRODUCT(pa->pa_id)) && 213 (PCI_PRODUCT(pa->pa_id) <= 214 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) && 215 PCI_REVISION(pa->pa_class) == 0) 216 return 1; 217 /* 218 * Non-transitional devices SHOULD have a PCI Revision 219 * ID of 1 or higher. Drivers MUST match any PCI 220 * Revision ID value. 221 */ 222 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <= 223 PCI_PRODUCT(pa->pa_id)) && 224 (PCI_PRODUCT(pa->pa_id) <= 225 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) && 226 /* XXX: TODO */ 227 PCI_REVISION(pa->pa_class) == 1) 228 return 1; 229 break; 230 } 231 232 return 0; 233 } 234 235 static void 236 virtio_pci_attach(device_t parent, device_t self, void *aux) 237 { 238 struct virtio_pci_softc * const psc = device_private(self); 239 struct virtio_softc * const sc = &psc->sc_sc; 240 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 241 pci_chipset_tag_t pc = pa->pa_pc; 242 pcitag_t tag = pa->pa_tag; 243 int revision; 244 int ret; 245 pcireg_t id; 246 pcireg_t csr; 247 248 revision = PCI_REVISION(pa->pa_class); 249 switch (revision) { 250 case 0: 251 /* subsystem ID shows what I am */ 252 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 253 break; 254 case 1: 255 /* pci product number shows what I am */ 256 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040; 257 break; 258 default: 259 aprint_normal(": unknown revision 0x%02x; giving up\n", 260 revision); 261 return; 262 } 263 264 aprint_normal("\n"); 265 aprint_naive("\n"); 266 virtio_print_device_type(self, id, revision); 267 268 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 269 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE; 270 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 271 272 sc->sc_dev = self; 273 psc->sc_pa = *pa; 274 psc->sc_iot = pa->pa_iot; 275 276 sc->sc_dmat = pa->pa_dmat; 277 if (pci_dma64_available(pa)) 278 sc->sc_dmat = pa->pa_dmat64; 279 280 /* attach is dependent on revision */ 281 ret = 0; 282 if (revision == 1) { 283 /* try to attach 1.0 */ 284 ret = virtio_pci_attach_10(self, aux); 285 } 286 if (ret == 0 && revision == 0) { 287 /* 288 * revision 0 means 0.9 only or both 0.9 and 1.0. The 289 * latter are so-called "Transitional Devices". For 290 * those devices, we want to use the 1.0 interface if 291 * possible. 292 * 293 * XXX Currently only on platforms that require 1.0 294 * XXX features, such as VIRTIO_F_ACCESS_PLATFORM. 295 */ 296 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM 297 /* First, try to attach 1.0 */ 298 ret = virtio_pci_attach_10(self, aux); 299 if (ret != 0) { 300 aprint_error_dev(self, 301 "VirtIO 1.0 error = %d, falling back to 0.9\n", 302 ret); 303 /* Fall back to 0.9. */ 304 ret = virtio_pci_attach_09(self, aux); 305 } 306 #else 307 ret = virtio_pci_attach_09(self, aux); 308 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */ 309 } 310 if (ret) { 311 aprint_error_dev(self, "cannot attach (%d)\n", ret); 312 return; 313 } 314 KASSERT(sc->sc_ops); 315 316 /* preset config region */ 317 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 318 if (virtio_pci_adjust_config_region(psc)) 319 return; 320 321 /* generic */ 322 virtio_device_reset(sc); 323 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 324 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 325 326 sc->sc_childdevid = id; 327 sc->sc_child = NULL; 328 virtio_pci_rescan(self, NULL, NULL); 329 return; 330 } 331 332 /* ARGSUSED */ 333 static int 334 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs) 335 { 336 struct virtio_pci_softc * const psc = device_private(self); 337 struct virtio_softc * const sc = &psc->sc_sc; 338 struct virtio_attach_args va; 339 340 if (sc->sc_child) /* Child already attached? */ 341 return 0; 342 343 memset(&va, 0, sizeof(va)); 344 va.sc_childdevid = sc->sc_childdevid; 345 346 config_found(self, &va, NULL, CFARGS_NONE); 347 348 if (virtio_attach_failed(sc)) 349 return 0; 350 351 return 0; 352 } 353 354 355 static int 356 virtio_pci_detach(device_t self, int flags) 357 { 358 struct virtio_pci_softc * const psc = device_private(self); 359 struct virtio_softc * const sc = &psc->sc_sc; 360 int r; 361 362 r = config_detach_children(self, flags); 363 if (r != 0) 364 return r; 365 366 /* Check that child never attached, or detached properly */ 367 KASSERT(sc->sc_child == NULL); 368 KASSERT(sc->sc_vqs == NULL); 369 KASSERT(psc->sc_ihs_num == 0); 370 371 if (psc->sc_iosize) 372 bus_space_unmap(psc->sc_iot, psc->sc_ioh, 373 psc->sc_mapped_iosize); 374 psc->sc_iosize = 0; 375 376 return 0; 377 } 378 379 380 static int 381 virtio_pci_attach_09(device_t self, void *aux) 382 //struct virtio_pci_softc *psc, struct pci_attach_args *pa) 383 { 384 struct virtio_pci_softc * const psc = device_private(self); 385 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 386 struct virtio_softc * const sc = &psc->sc_sc; 387 // pci_chipset_tag_t pc = pa->pa_pc; 388 // pcitag_t tag = pa->pa_tag; 389 390 /* complete IO region */ 391 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 392 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) { 393 aprint_error_dev(self, "can't map i/o space\n"); 394 return EIO; 395 } 396 psc->sc_mapped_iosize = psc->sc_iosize; 397 398 /* queue space */ 399 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 400 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) { 401 aprint_error_dev(self, "can't map notify i/o space\n"); 402 return EIO; 403 } 404 psc->sc_notify_iosize = 2; 405 psc->sc_notify_iot = psc->sc_iot; 406 407 /* ISR space */ 408 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 409 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) { 410 aprint_error_dev(self, "can't map isr i/o space\n"); 411 return EIO; 412 } 413 psc->sc_isr_iosize = 1; 414 psc->sc_isr_iot = psc->sc_iot; 415 416 /* set our version 0.9 ops */ 417 sc->sc_ops = &virtio_pci_ops_09; 418 sc->sc_bus_endian = READ_ENDIAN_09; 419 sc->sc_struct_endian = STRUCT_ENDIAN_09; 420 return 0; 421 } 422 423 424 static int 425 virtio_pci_attach_10(device_t self, void *aux) 426 { 427 struct virtio_pci_softc * const psc = device_private(self); 428 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 429 struct virtio_softc * const sc = &psc->sc_sc; 430 pci_chipset_tag_t pc = pa->pa_pc; 431 pcitag_t tag = pa->pa_tag; 432 433 struct virtio_pci_cap common, isr, device; 434 struct virtio_pci_notify_cap notify; 435 int have_device_cfg = 0; 436 bus_size_t bars[NMAPREG] = { 0 }; 437 int bars_idx[NMAPREG] = { 0 }; 438 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 439 int i, j, ret = 0; 440 441 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG, 442 &common, sizeof(common))) 443 return ENODEV; 444 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG, 445 ¬ify, sizeof(notify))) 446 return ENODEV; 447 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG, 448 &isr, sizeof(isr))) 449 return ENODEV; 450 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG, 451 &device, sizeof(device))) 452 memset(&device, 0, sizeof(device)); 453 else 454 have_device_cfg = 1; 455 456 /* Figure out which bars we need to map */ 457 for (i = 0; i < __arraycount(caps); i++) { 458 int bar = caps[i]->bar; 459 bus_size_t len = caps[i]->offset + caps[i]->length; 460 if (caps[i]->length == 0) 461 continue; 462 if (bars[bar] < len) 463 bars[bar] = len; 464 } 465 466 for (i = j = 0; i < __arraycount(bars); i++) { 467 int reg; 468 pcireg_t type; 469 if (bars[i] == 0) 470 continue; 471 reg = PCI_BAR(i); 472 type = pci_mapreg_type(pc, tag, reg); 473 if (pci_mapreg_map(pa, reg, type, 0, 474 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j], 475 NULL, &psc->sc_bars_iosize[j])) { 476 aprint_error_dev(self, "can't map bar %u \n", i); 477 ret = EIO; 478 goto err; 479 } 480 aprint_debug_dev(self, 481 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n", 482 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]); 483 bars_idx[i] = j; 484 j++; 485 } 486 487 i = bars_idx[notify.cap.bar]; 488 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 489 notify.cap.offset, notify.cap.length, 490 &psc->sc_notify_ioh)) { 491 aprint_error_dev(self, "can't map notify i/o space\n"); 492 ret = EIO; 493 goto err; 494 } 495 psc->sc_notify_iosize = notify.cap.length; 496 psc->sc_notify_iot = psc->sc_bars_iot[i]; 497 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier); 498 499 if (have_device_cfg) { 500 i = bars_idx[device.bar]; 501 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 502 device.offset, device.length, 503 &sc->sc_devcfg_ioh)) { 504 aprint_error_dev(self, "can't map devcfg i/o space\n"); 505 ret = EIO; 506 goto err; 507 } 508 aprint_debug_dev(self, 509 "device.offset = 0x%x, device.length = 0x%x\n", 510 device.offset, device.length); 511 sc->sc_devcfg_iosize = device.length; 512 sc->sc_devcfg_iot = psc->sc_bars_iot[i]; 513 } 514 515 i = bars_idx[isr.bar]; 516 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 517 isr.offset, isr.length, &psc->sc_isr_ioh)) { 518 aprint_error_dev(self, "can't map isr i/o space\n"); 519 ret = EIO; 520 goto err; 521 } 522 psc->sc_isr_iosize = isr.length; 523 psc->sc_isr_iot = psc->sc_bars_iot[i]; 524 525 i = bars_idx[common.bar]; 526 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 527 common.offset, common.length, &psc->sc_ioh)) { 528 aprint_error_dev(self, "can't map common i/o space\n"); 529 ret = EIO; 530 goto err; 531 } 532 psc->sc_iosize = common.length; 533 psc->sc_iot = psc->sc_bars_iot[i]; 534 psc->sc_mapped_iosize = psc->sc_bars_iosize[i]; 535 536 psc->sc_sc.sc_version_1 = 1; 537 538 /* set our version 1.0 ops */ 539 sc->sc_ops = &virtio_pci_ops_10; 540 sc->sc_bus_endian = READ_ENDIAN_10; 541 sc->sc_struct_endian = STRUCT_ENDIAN_10; 542 return 0; 543 544 err: 545 /* undo our pci_mapreg_map()s */ 546 for (i = 0; i < __arraycount(bars); i++) { 547 if (psc->sc_bars_iosize[i] == 0) 548 continue; 549 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 550 psc->sc_bars_iosize[i]); 551 } 552 return ret; 553 } 554 555 /* v1.0 attach helper */ 556 static int 557 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen) 558 { 559 device_t self = psc->sc_sc.sc_dev; 560 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 561 pcitag_t tag = psc->sc_pa.pa_tag; 562 unsigned int offset, i, len; 563 union { 564 pcireg_t reg[8]; 565 struct virtio_pci_cap vcap; 566 } *v = buf; 567 568 if (buflen < sizeof(struct virtio_pci_cap)) 569 return ERANGE; 570 571 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 572 return ENOENT; 573 574 do { 575 for (i = 0; i < 4; i++) 576 v->reg[i] = 577 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 578 if (v->vcap.cfg_type == cfg_type) 579 break; 580 offset = v->vcap.cap_next; 581 } while (offset != 0); 582 583 if (offset == 0) 584 return ENOENT; 585 586 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 587 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 588 if (len > buflen) { 589 aprint_error_dev(self, "%s cap too large\n", __func__); 590 return ERANGE; 591 } 592 for (i = 4; i < len / sizeof(pcireg_t); i++) 593 v->reg[i] = 594 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 595 } 596 597 /* endian fixup */ 598 v->vcap.offset = le32toh(v->vcap.offset); 599 v->vcap.length = le32toh(v->vcap.length); 600 return 0; 601 } 602 603 604 /* ------------------------------------- 605 * Version 0.9 support 606 * -------------------------------------*/ 607 608 static void 609 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx) 610 { 611 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 612 613 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx); 614 } 615 616 /* only applicable for v 0.9 but also called for 1.0 */ 617 static int 618 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc) 619 { 620 struct virtio_softc * const sc = &psc->sc_sc; 621 device_t self = sc->sc_dev; 622 623 if (psc->sc_sc.sc_version_1) 624 return 0; 625 626 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset; 627 sc->sc_devcfg_iot = psc->sc_iot; 628 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 629 psc->sc_devcfg_offset, sc->sc_devcfg_iosize, 630 &sc->sc_devcfg_ioh)) { 631 aprint_error_dev(self, "can't map config i/o space\n"); 632 return EIO; 633 } 634 635 return 0; 636 } 637 638 static uint16_t 639 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx) 640 { 641 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 642 643 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 644 VIRTIO_CONFIG_QUEUE_SELECT, idx); 645 return bus_space_read_2(psc->sc_iot, psc->sc_ioh, 646 VIRTIO_CONFIG_QUEUE_SIZE); 647 } 648 649 static void 650 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 651 { 652 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 653 654 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 655 VIRTIO_CONFIG_QUEUE_SELECT, idx); 656 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 657 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 658 659 if (psc->sc_ihs_num > 1) { 660 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 661 if (psc->sc_intr_pervq) 662 vec += idx; 663 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 664 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); 665 } 666 } 667 668 static void 669 virtio_pci_set_status_09(struct virtio_softc *sc, int status) 670 { 671 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 672 int old = 0; 673 674 if (status != 0) { 675 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh, 676 VIRTIO_CONFIG_DEVICE_STATUS); 677 } 678 bus_space_write_1(psc->sc_iot, psc->sc_ioh, 679 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 680 } 681 682 static void 683 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features) 684 { 685 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 686 uint32_t r; 687 688 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh, 689 VIRTIO_CONFIG_DEVICE_FEATURES); 690 691 r &= guest_features; 692 693 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 694 VIRTIO_CONFIG_GUEST_FEATURES, r); 695 696 sc->sc_active_features = r; 697 } 698 699 /* ------------------------------------- 700 * Version 1.0 support 701 * -------------------------------------*/ 702 703 static void 704 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx) 705 { 706 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 707 unsigned offset = sc->sc_vqs[idx].vq_notify_off * 708 psc->sc_notify_off_multiplier; 709 710 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx); 711 } 712 713 714 static uint16_t 715 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx) 716 { 717 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 718 bus_space_tag_t iot = psc->sc_iot; 719 bus_space_handle_t ioh = psc->sc_ioh; 720 721 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx); 722 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE); 723 } 724 725 /* 726 * By definition little endian only in v1.0. NB: "MAY" in the text 727 * below refers to "independently" (i.e. the order of accesses) not 728 * "32-bit" (which is restricted by the earlier "MUST"). 729 * 730 * 4.1.3.1 Driver Requirements: PCI Device Layout 731 * 732 * For device configuration access, the driver MUST use ... 32-bit 733 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit 734 * fields, the driver MAY access each of the high and low 32-bit parts 735 * of the field independently. 736 */ 737 static __inline void 738 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh, 739 bus_size_t offset, uint64_t value) 740 { 741 #if _QUAD_HIGHWORD 742 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value)); 743 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value)); 744 #else 745 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value)); 746 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value)); 747 #endif 748 } 749 750 static void 751 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 752 { 753 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 754 struct virtqueue *vq = &sc->sc_vqs[idx]; 755 bus_space_tag_t iot = psc->sc_iot; 756 bus_space_handle_t ioh = psc->sc_ioh; 757 KASSERT(vq->vq_index == idx); 758 759 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 760 if (addr == 0) { 761 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0); 762 virtio_pci_bus_space_write_8(iot, ioh, 763 VIRTIO_CONFIG1_QUEUE_DESC, 0); 764 virtio_pci_bus_space_write_8(iot, ioh, 765 VIRTIO_CONFIG1_QUEUE_AVAIL, 0); 766 virtio_pci_bus_space_write_8(iot, ioh, 767 VIRTIO_CONFIG1_QUEUE_USED, 0); 768 } else { 769 virtio_pci_bus_space_write_8(iot, ioh, 770 VIRTIO_CONFIG1_QUEUE_DESC, addr); 771 virtio_pci_bus_space_write_8(iot, ioh, 772 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 773 virtio_pci_bus_space_write_8(iot, ioh, 774 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 775 bus_space_write_2(iot, ioh, 776 VIRTIO_CONFIG1_QUEUE_ENABLE, 1); 777 vq->vq_notify_off = bus_space_read_2(iot, ioh, 778 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF); 779 } 780 781 if (psc->sc_ihs_num > 1) { 782 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 783 if (psc->sc_intr_pervq) 784 vec += idx; 785 bus_space_write_2(iot, ioh, 786 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); 787 } 788 } 789 790 static void 791 virtio_pci_set_status_10(struct virtio_softc *sc, int status) 792 { 793 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 794 bus_space_tag_t iot = psc->sc_iot; 795 bus_space_handle_t ioh = psc->sc_ioh; 796 int old = 0; 797 798 if (status) 799 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 800 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old); 801 } 802 803 void 804 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features) 805 { 806 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 807 device_t self = sc->sc_dev; 808 bus_space_tag_t iot = psc->sc_iot; 809 bus_space_handle_t ioh = psc->sc_ioh; 810 uint64_t host, negotiated, device_status; 811 812 guest_features |= VIRTIO_F_VERSION_1; 813 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM 814 /* XXX This could use some work. */ 815 guest_features |= VIRTIO_F_ACCESS_PLATFORM; 816 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */ 817 /* notify on empty is 0.9 only */ 818 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 819 sc->sc_active_features = 0; 820 821 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0); 822 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE); 823 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1); 824 host |= (uint64_t) 825 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32; 826 827 negotiated = host & guest_features; 828 829 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0); 830 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 831 negotiated & 0xffffffff); 832 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1); 833 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 834 negotiated >> 32); 835 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 836 837 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 838 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 839 aprint_error_dev(self, "feature negotiation failed\n"); 840 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 841 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 842 return; 843 } 844 845 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 846 aprint_error_dev(self, "host rejected version 1\n"); 847 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 848 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 849 return; 850 } 851 852 sc->sc_active_features = negotiated; 853 return; 854 } 855 856 857 /* ------------------------------------- 858 * Generic PCI interrupt code 859 * -------------------------------------*/ 860 861 static int 862 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit) 863 { 864 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 865 bus_space_tag_t iot = psc->sc_iot; 866 bus_space_handle_t ioh = psc->sc_ioh; 867 int vector, ret, qid; 868 869 if (!virtio_pci_msix_enabled(psc)) 870 return 0; 871 872 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 873 bus_space_write_2(iot, ioh, 874 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector); 875 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR); 876 if (ret != vector) { 877 VIRTIO_PCI_LOG(sc, reinit, 878 "can't set config msix vector\n"); 879 return -1; 880 } 881 882 for (qid = 0; qid < sc->sc_nvqs; qid++) { 883 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 884 885 if (psc->sc_intr_pervq) 886 vector += qid; 887 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); 888 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, 889 vector); 890 ret = bus_space_read_2(iot, ioh, 891 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR); 892 if (ret != vector) { 893 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 894 "msix vector\n", qid); 895 return -1; 896 } 897 } 898 899 return 0; 900 } 901 902 static int 903 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit) 904 { 905 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 906 int offset, vector, ret, qid; 907 908 if (!virtio_pci_msix_enabled(psc)) 909 return 0; 910 911 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 912 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 913 914 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 915 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 916 if (ret != vector) { 917 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n", 918 __func__, vector, ret); 919 VIRTIO_PCI_LOG(sc, reinit, 920 "can't set config msix vector\n"); 921 return -1; 922 } 923 924 for (qid = 0; qid < sc->sc_nvqs; qid++) { 925 offset = VIRTIO_CONFIG_QUEUE_SELECT; 926 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid); 927 928 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 929 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 930 931 if (psc->sc_intr_pervq) 932 vector += qid; 933 934 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 935 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 936 if (ret != vector) { 937 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:" 938 " expected=%d, actual=%d\n", 939 __func__, qid, vector, ret); 940 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 941 "msix vector\n", qid); 942 return -1; 943 } 944 } 945 946 return 0; 947 } 948 949 static int 950 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc, 951 struct pci_attach_args *pa) 952 { 953 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 954 device_t self = sc->sc_dev; 955 pci_chipset_tag_t pc = pa->pa_pc; 956 struct virtqueue *vq; 957 char intrbuf[PCI_INTRSTR_LEN]; 958 char intr_xname[INTRDEVNAMEBUF]; 959 char const *intrstr; 960 int idx, qid, n; 961 962 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 963 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 964 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 965 966 snprintf(intr_xname, sizeof(intr_xname), "%s config", 967 device_xname(sc->sc_dev)); 968 969 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 970 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname); 971 if (psc->sc_ihs[idx] == NULL) { 972 aprint_error_dev(self, "couldn't establish MSI-X for config\n"); 973 goto error; 974 } 975 976 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 977 if (psc->sc_intr_pervq) { 978 for (qid = 0; qid < sc->sc_nvqs; qid++) { 979 n = idx + qid; 980 vq = &sc->sc_vqs[qid]; 981 982 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 983 device_xname(sc->sc_dev), qid); 984 985 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 986 pci_intr_setattr(pc, &psc->sc_ihp[n], 987 PCI_INTR_MPSAFE, true); 988 } 989 990 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n], 991 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname); 992 if (psc->sc_ihs[n] == NULL) { 993 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n"); 994 goto error; 995 } 996 } 997 } else { 998 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 999 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 1000 1001 snprintf(intr_xname, sizeof(intr_xname), "%s queues", 1002 device_xname(sc->sc_dev)); 1003 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 1004 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname); 1005 if (psc->sc_ihs[idx] == NULL) { 1006 aprint_error_dev(self, "couldn't establish MSI-X for queues\n"); 1007 goto error; 1008 } 1009 } 1010 1011 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1012 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 1013 aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 1014 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1015 if (psc->sc_intr_pervq) { 1016 kcpuset_t *affinity; 1017 int affinity_to, r; 1018 1019 kcpuset_create(&affinity, false); 1020 1021 for (qid = 0; qid < sc->sc_nvqs; qid++) { 1022 n = idx + qid; 1023 affinity_to = (qid / 2) % ncpu; 1024 1025 intrstr = pci_intr_string(pc, psc->sc_ihp[n], 1026 intrbuf, sizeof(intrbuf)); 1027 1028 kcpuset_zero(affinity); 1029 kcpuset_set(affinity, affinity_to); 1030 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL); 1031 if (r == 0) { 1032 aprint_normal_dev(self, 1033 "for vq #%d interrupting at %s affinity to %u\n", 1034 qid, intrstr, affinity_to); 1035 } else { 1036 aprint_normal_dev(self, 1037 "for vq #%d interrupting at %s\n", 1038 qid, intrstr); 1039 } 1040 } 1041 1042 kcpuset_destroy(affinity); 1043 } else { 1044 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 1045 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr); 1046 } 1047 1048 return 0; 1049 1050 error: 1051 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1052 if (psc->sc_ihs[idx] != NULL) 1053 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1054 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1055 if (psc->sc_intr_pervq) { 1056 for (qid = 0; qid < sc->sc_nvqs; qid++) { 1057 n = idx + qid; 1058 if (psc->sc_ihs[n] == NULL) 1059 continue; 1060 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]); 1061 } 1062 1063 } else { 1064 if (psc->sc_ihs[idx] != NULL) 1065 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1066 } 1067 1068 return -1; 1069 } 1070 1071 static int 1072 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc, 1073 struct pci_attach_args *pa) 1074 { 1075 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1076 device_t self = sc->sc_dev; 1077 pci_chipset_tag_t pc = pa->pa_pc; 1078 char intrbuf[PCI_INTRSTR_LEN]; 1079 char const *intrstr; 1080 1081 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1082 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true); 1083 1084 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0], 1085 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev)); 1086 if (psc->sc_ihs[0] == NULL) { 1087 aprint_error_dev(self, "couldn't establish INTx\n"); 1088 return -1; 1089 } 1090 1091 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf)); 1092 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1093 1094 return 0; 1095 } 1096 1097 static int 1098 virtio_pci_alloc_interrupts(struct virtio_softc *sc) 1099 { 1100 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1101 device_t self = sc->sc_dev; 1102 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1103 pcitag_t tag = psc->sc_pa.pa_tag; 1104 int error; 1105 int nmsix; 1106 int off; 1107 int counts[PCI_INTR_TYPE_SIZE]; 1108 pci_intr_type_t max_type; 1109 pcireg_t ctl; 1110 1111 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag); 1112 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 1113 1114 /* We need at least two: one for config and the other for queues */ 1115 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) { 1116 /* Try INTx only */ 1117 max_type = PCI_INTR_TYPE_INTX; 1118 counts[PCI_INTR_TYPE_INTX] = 1; 1119 } else { 1120 /* Try MSI-X first and INTx second */ 1121 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) && 1122 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { 1123 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1124 } else { 1125 nmsix = 2; 1126 } 1127 1128 max_type = PCI_INTR_TYPE_MSIX; 1129 counts[PCI_INTR_TYPE_MSIX] = nmsix; 1130 counts[PCI_INTR_TYPE_MSI] = 0; 1131 counts[PCI_INTR_TYPE_INTX] = 1; 1132 } 1133 1134 retry: 1135 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type); 1136 if (error != 0) { 1137 aprint_error_dev(self, "couldn't map interrupt\n"); 1138 return -1; 1139 } 1140 1141 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 1142 psc->sc_intr_pervq = nmsix > 2 ? true : false; 1143 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, 1144 KM_SLEEP); 1145 1146 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa); 1147 if (error != 0) { 1148 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix); 1149 pci_intr_release(pc, psc->sc_ihp, nmsix); 1150 1151 /* Retry INTx */ 1152 max_type = PCI_INTR_TYPE_INTX; 1153 counts[PCI_INTR_TYPE_INTX] = 1; 1154 goto retry; 1155 } 1156 1157 psc->sc_ihs_num = nmsix; 1158 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1159 virtio_pci_adjust_config_region(psc); 1160 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 1161 psc->sc_intr_pervq = false; 1162 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, 1163 KM_SLEEP); 1164 1165 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa); 1166 if (error != 0) { 1167 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1); 1168 pci_intr_release(pc, psc->sc_ihp, 1); 1169 return -1; 1170 } 1171 1172 psc->sc_ihs_num = 1; 1173 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1174 virtio_pci_adjust_config_region(psc); 1175 1176 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL); 1177 if (error != 0) { 1178 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 1179 ctl &= ~PCI_MSIX_CTL_ENABLE; 1180 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 1181 } 1182 } 1183 1184 if (!psc->sc_intr_pervq) 1185 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ); 1186 return 0; 1187 } 1188 1189 static void 1190 virtio_pci_free_interrupts(struct virtio_softc *sc) 1191 { 1192 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1193 1194 for (int i = 0; i < psc->sc_ihs_num; i++) { 1195 if (psc->sc_ihs[i] == NULL) 1196 continue; 1197 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]); 1198 psc->sc_ihs[i] = NULL; 1199 } 1200 1201 if (psc->sc_ihs_num > 0) 1202 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num); 1203 1204 if (psc->sc_ihs != NULL) { 1205 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num); 1206 psc->sc_ihs = NULL; 1207 } 1208 psc->sc_ihs_num = 0; 1209 } 1210 1211 static bool 1212 virtio_pci_msix_enabled(struct virtio_pci_softc *psc) 1213 { 1214 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1215 1216 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) 1217 return true; 1218 1219 return false; 1220 } 1221 1222 /* 1223 * Interrupt handler. 1224 */ 1225 static int 1226 virtio_pci_intr(void *arg) 1227 { 1228 struct virtio_softc *sc = arg; 1229 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1230 int isr, r = 0; 1231 1232 /* check and ack the interrupt */ 1233 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0); 1234 if (isr == 0) 1235 return 0; 1236 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1237 (sc->sc_config_change != NULL)) 1238 r = (sc->sc_config_change)(sc); 1239 if (sc->sc_intrhand != NULL) { 1240 if (sc->sc_soft_ih != NULL) 1241 softint_schedule(sc->sc_soft_ih); 1242 else 1243 r |= (sc->sc_intrhand)(sc); 1244 } 1245 1246 return r; 1247 } 1248 1249 static int 1250 virtio_pci_msix_queue_intr(void *arg) 1251 { 1252 struct virtio_softc *sc = arg; 1253 int r = 0; 1254 1255 if (sc->sc_intrhand != NULL) { 1256 if (sc->sc_soft_ih != NULL) 1257 softint_schedule(sc->sc_soft_ih); 1258 else 1259 r |= (sc->sc_intrhand)(sc); 1260 } 1261 1262 return r; 1263 } 1264 1265 static int 1266 virtio_pci_msix_config_intr(void *arg) 1267 { 1268 struct virtio_softc *sc = arg; 1269 int r = 0; 1270 1271 if (sc->sc_config_change != NULL) 1272 r = (sc->sc_config_change)(sc); 1273 return r; 1274 } 1275 1276 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio"); 1277 1278 #ifdef _MODULE 1279 #include "ioconf.c" 1280 #endif 1281 1282 static int 1283 virtio_pci_modcmd(modcmd_t cmd, void *opaque) 1284 { 1285 int error = 0; 1286 1287 #ifdef _MODULE 1288 switch (cmd) { 1289 case MODULE_CMD_INIT: 1290 error = config_init_component(cfdriver_ioconf_virtio_pci, 1291 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1292 break; 1293 case MODULE_CMD_FINI: 1294 error = config_fini_component(cfdriver_ioconf_virtio_pci, 1295 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1296 break; 1297 default: 1298 error = ENOTTY; 1299 break; 1300 } 1301 #endif 1302 1303 return error; 1304 } 1305