1 /* $NetBSD: virtio_pci.c,v 1.38 2022/05/30 20:28:18 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.38 2022/05/30 20:28:18 riastradh Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kmem.h> 36 #include <sys/module.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/syslog.h> 40 41 #include <sys/device.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 48 #include <dev/pci/virtio_pcireg.h> 49 50 #define VIRTIO_PRIVATE 51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 52 53 54 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \ 55 do { \ 56 if ((_use_log)) { \ 57 log(LOG_DEBUG, "%s: " _fmt, \ 58 device_xname((_sc)->sc_dev), \ 59 ##_args); \ 60 } else { \ 61 aprint_error_dev((_sc)->sc_dev, \ 62 _fmt, ##_args); \ 63 } \ 64 } while(0) 65 66 static int virtio_pci_match(device_t, cfdata_t, void *); 67 static void virtio_pci_attach(device_t, device_t, void *); 68 static int virtio_pci_rescan(device_t, const char *, const int *); 69 static int virtio_pci_detach(device_t, int); 70 71 72 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 73 sizeof(pcireg_t)) 74 struct virtio_pci_softc { 75 struct virtio_softc sc_sc; 76 77 /* IO space */ 78 bus_space_tag_t sc_iot; 79 bus_space_handle_t sc_ioh; 80 bus_size_t sc_iosize; 81 bus_size_t sc_mapped_iosize; 82 83 /* BARs */ 84 bus_space_tag_t sc_bars_iot[NMAPREG]; 85 bus_space_handle_t sc_bars_ioh[NMAPREG]; 86 bus_size_t sc_bars_iosize[NMAPREG]; 87 88 /* notify space */ 89 bus_space_tag_t sc_notify_iot; 90 bus_space_handle_t sc_notify_ioh; 91 bus_size_t sc_notify_iosize; 92 uint32_t sc_notify_off_multiplier; 93 94 /* isr space */ 95 bus_space_tag_t sc_isr_iot; 96 bus_space_handle_t sc_isr_ioh; 97 bus_size_t sc_isr_iosize; 98 99 /* generic */ 100 struct pci_attach_args sc_pa; 101 pci_intr_handle_t *sc_ihp; 102 void **sc_ihs; 103 int sc_ihs_num; 104 int sc_devcfg_offset; /* for 0.9 */ 105 }; 106 107 static int virtio_pci_attach_09(device_t, void *); 108 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t); 109 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t); 110 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t); 111 static void virtio_pci_set_status_09(struct virtio_softc *, int); 112 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t); 113 114 static int virtio_pci_attach_10(device_t, void *); 115 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t); 116 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t); 117 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t); 118 static void virtio_pci_set_status_10(struct virtio_softc *, int); 119 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t); 120 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen); 121 122 static int virtio_pci_alloc_interrupts(struct virtio_softc *); 123 static void virtio_pci_free_interrupts(struct virtio_softc *); 124 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc); 125 static int virtio_pci_intr(void *arg); 126 static int virtio_pci_msix_queue_intr(void *); 127 static int virtio_pci_msix_config_intr(void *); 128 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int); 129 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int); 130 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *, 131 struct pci_attach_args *); 132 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *, 133 struct pci_attach_args *); 134 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *); 135 136 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 137 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 138 139 /* 140 * When using PCI attached virtio on aarch64-eb under Qemu, the IO space 141 * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read 142 * 1 byte at a time seem OK but reading bigger lengths result in swapped 143 * endian. This is most notable on reading 8 byters since we can't use 144 * bus_space_{read,write}_8(). 145 */ 146 147 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN 148 # define READ_ENDIAN_09 BIG_ENDIAN /* should be LITTLE_ENDIAN */ 149 # define READ_ENDIAN_10 BIG_ENDIAN 150 # define STRUCT_ENDIAN_09 BIG_ENDIAN 151 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 152 #elif BYTE_ORDER == BIG_ENDIAN 153 # define READ_ENDIAN_09 LITTLE_ENDIAN 154 # define READ_ENDIAN_10 BIG_ENDIAN 155 # define STRUCT_ENDIAN_09 BIG_ENDIAN 156 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 157 #else /* little endian */ 158 # define READ_ENDIAN_09 LITTLE_ENDIAN 159 # define READ_ENDIAN_10 LITTLE_ENDIAN 160 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN 161 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 162 #endif 163 164 165 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc), 166 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL, 167 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN); 168 169 static const struct virtio_ops virtio_pci_ops_09 = { 170 .kick = virtio_pci_kick_09, 171 .read_queue_size = virtio_pci_read_queue_size_09, 172 .setup_queue = virtio_pci_setup_queue_09, 173 .set_status = virtio_pci_set_status_09, 174 .neg_features = virtio_pci_negotiate_features_09, 175 .alloc_interrupts = virtio_pci_alloc_interrupts, 176 .free_interrupts = virtio_pci_free_interrupts, 177 .setup_interrupts = virtio_pci_setup_interrupts_09, 178 }; 179 180 static const struct virtio_ops virtio_pci_ops_10 = { 181 .kick = virtio_pci_kick_10, 182 .read_queue_size = virtio_pci_read_queue_size_10, 183 .setup_queue = virtio_pci_setup_queue_10, 184 .set_status = virtio_pci_set_status_10, 185 .neg_features = virtio_pci_negotiate_features_10, 186 .alloc_interrupts = virtio_pci_alloc_interrupts, 187 .free_interrupts = virtio_pci_free_interrupts, 188 .setup_interrupts = virtio_pci_setup_interrupts_10, 189 }; 190 191 static int 192 virtio_pci_match(device_t parent, cfdata_t match, void *aux) 193 { 194 struct pci_attach_args *pa; 195 196 pa = (struct pci_attach_args *)aux; 197 switch (PCI_VENDOR(pa->pa_id)) { 198 case PCI_VENDOR_QUMRANET: 199 /* Transitional devices MUST have a PCI Revision ID of 0. */ 200 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 201 PCI_PRODUCT(pa->pa_id)) && 202 (PCI_PRODUCT(pa->pa_id) <= 203 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) && 204 PCI_REVISION(pa->pa_class) == 0) 205 return 1; 206 /* 207 * Non-transitional devices SHOULD have a PCI Revision 208 * ID of 1 or higher. Drivers MUST match any PCI 209 * Revision ID value. 210 */ 211 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <= 212 PCI_PRODUCT(pa->pa_id)) && 213 (PCI_PRODUCT(pa->pa_id) <= 214 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) && 215 /* XXX: TODO */ 216 PCI_REVISION(pa->pa_class) == 1) 217 return 1; 218 break; 219 } 220 221 return 0; 222 } 223 224 static void 225 virtio_pci_attach(device_t parent, device_t self, void *aux) 226 { 227 struct virtio_pci_softc * const psc = device_private(self); 228 struct virtio_softc * const sc = &psc->sc_sc; 229 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 230 pci_chipset_tag_t pc = pa->pa_pc; 231 pcitag_t tag = pa->pa_tag; 232 int revision; 233 int ret; 234 pcireg_t id; 235 pcireg_t csr; 236 237 revision = PCI_REVISION(pa->pa_class); 238 switch (revision) { 239 case 0: 240 /* subsystem ID shows what I am */ 241 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 242 break; 243 case 1: 244 /* pci product number shows what I am */ 245 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040; 246 break; 247 default: 248 aprint_normal(": unknown revision 0x%02x; giving up\n", 249 revision); 250 return; 251 } 252 253 aprint_normal("\n"); 254 aprint_naive("\n"); 255 virtio_print_device_type(self, id, revision); 256 257 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 258 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE; 259 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 260 261 sc->sc_dev = self; 262 psc->sc_pa = *pa; 263 psc->sc_iot = pa->pa_iot; 264 265 sc->sc_dmat = pa->pa_dmat; 266 if (pci_dma64_available(pa)) 267 sc->sc_dmat = pa->pa_dmat64; 268 269 /* attach is dependent on revision */ 270 ret = 0; 271 if (revision == 1) { 272 /* try to attach 1.0 */ 273 ret = virtio_pci_attach_10(self, aux); 274 } 275 if (ret == 0 && revision == 0) { 276 /* revision 0 means 0.9 only or both 0.9 and 1.0 */ 277 ret = virtio_pci_attach_09(self, aux); 278 } 279 if (ret) { 280 aprint_error_dev(self, "cannot attach (%d)\n", ret); 281 return; 282 } 283 KASSERT(sc->sc_ops); 284 285 /* preset config region */ 286 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 287 if (virtio_pci_adjust_config_region(psc)) 288 return; 289 290 /* generic */ 291 virtio_device_reset(sc); 292 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 293 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 294 295 sc->sc_childdevid = id; 296 sc->sc_child = NULL; 297 virtio_pci_rescan(self, NULL, NULL); 298 return; 299 } 300 301 /* ARGSUSED */ 302 static int 303 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs) 304 { 305 struct virtio_pci_softc * const psc = device_private(self); 306 struct virtio_softc * const sc = &psc->sc_sc; 307 struct virtio_attach_args va; 308 309 if (sc->sc_child) /* Child already attached? */ 310 return 0; 311 312 memset(&va, 0, sizeof(va)); 313 va.sc_childdevid = sc->sc_childdevid; 314 315 config_found(self, &va, NULL, CFARGS_NONE); 316 317 if (virtio_attach_failed(sc)) 318 return 0; 319 320 return 0; 321 } 322 323 324 static int 325 virtio_pci_detach(device_t self, int flags) 326 { 327 struct virtio_pci_softc * const psc = device_private(self); 328 struct virtio_softc * const sc = &psc->sc_sc; 329 int r; 330 331 if (sc->sc_child != NULL) { 332 r = config_detach(sc->sc_child, flags); 333 if (r) 334 return r; 335 } 336 337 /* Check that child detached properly */ 338 KASSERT(sc->sc_child == NULL); 339 KASSERT(sc->sc_vqs == NULL); 340 KASSERT(psc->sc_ihs_num == 0); 341 342 if (psc->sc_iosize) 343 bus_space_unmap(psc->sc_iot, psc->sc_ioh, 344 psc->sc_mapped_iosize); 345 psc->sc_iosize = 0; 346 347 return 0; 348 } 349 350 351 static int 352 virtio_pci_attach_09(device_t self, void *aux) 353 //struct virtio_pci_softc *psc, struct pci_attach_args *pa) 354 { 355 struct virtio_pci_softc * const psc = device_private(self); 356 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 357 struct virtio_softc * const sc = &psc->sc_sc; 358 // pci_chipset_tag_t pc = pa->pa_pc; 359 // pcitag_t tag = pa->pa_tag; 360 361 /* complete IO region */ 362 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 363 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) { 364 aprint_error_dev(self, "can't map i/o space\n"); 365 return EIO; 366 } 367 psc->sc_mapped_iosize = psc->sc_iosize; 368 369 /* queue space */ 370 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 371 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) { 372 aprint_error_dev(self, "can't map notify i/o space\n"); 373 return EIO; 374 } 375 psc->sc_notify_iosize = 2; 376 psc->sc_notify_iot = psc->sc_iot; 377 378 /* ISR space */ 379 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 380 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) { 381 aprint_error_dev(self, "can't map isr i/o space\n"); 382 return EIO; 383 } 384 psc->sc_isr_iosize = 1; 385 psc->sc_isr_iot = psc->sc_iot; 386 387 /* set our version 0.9 ops */ 388 sc->sc_ops = &virtio_pci_ops_09; 389 sc->sc_bus_endian = READ_ENDIAN_09; 390 sc->sc_struct_endian = STRUCT_ENDIAN_09; 391 return 0; 392 } 393 394 395 static int 396 virtio_pci_attach_10(device_t self, void *aux) 397 { 398 struct virtio_pci_softc * const psc = device_private(self); 399 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 400 struct virtio_softc * const sc = &psc->sc_sc; 401 pci_chipset_tag_t pc = pa->pa_pc; 402 pcitag_t tag = pa->pa_tag; 403 404 struct virtio_pci_cap common, isr, device; 405 struct virtio_pci_notify_cap notify; 406 int have_device_cfg = 0; 407 bus_size_t bars[NMAPREG] = { 0 }; 408 int bars_idx[NMAPREG] = { 0 }; 409 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 410 int i, j, ret = 0; 411 412 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG, 413 &common, sizeof(common))) 414 return ENODEV; 415 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG, 416 ¬ify, sizeof(notify))) 417 return ENODEV; 418 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG, 419 &isr, sizeof(isr))) 420 return ENODEV; 421 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG, 422 &device, sizeof(device))) 423 memset(&device, 0, sizeof(device)); 424 else 425 have_device_cfg = 1; 426 427 /* Figure out which bars we need to map */ 428 for (i = 0; i < __arraycount(caps); i++) { 429 int bar = caps[i]->bar; 430 bus_size_t len = caps[i]->offset + caps[i]->length; 431 if (caps[i]->length == 0) 432 continue; 433 if (bars[bar] < len) 434 bars[bar] = len; 435 } 436 437 for (i = j = 0; i < __arraycount(bars); i++) { 438 int reg; 439 pcireg_t type; 440 if (bars[i] == 0) 441 continue; 442 reg = PCI_BAR(i); 443 type = pci_mapreg_type(pc, tag, reg); 444 if (pci_mapreg_map(pa, reg, type, 0, 445 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j], 446 NULL, &psc->sc_bars_iosize[j])) { 447 aprint_error_dev(self, "can't map bar %u \n", i); 448 ret = EIO; 449 goto err; 450 } 451 aprint_debug_dev(self, 452 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n", 453 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]); 454 bars_idx[i] = j; 455 j++; 456 } 457 458 i = bars_idx[notify.cap.bar]; 459 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 460 notify.cap.offset, notify.cap.length, 461 &psc->sc_notify_ioh)) { 462 aprint_error_dev(self, "can't map notify i/o space\n"); 463 ret = EIO; 464 goto err; 465 } 466 psc->sc_notify_iosize = notify.cap.length; 467 psc->sc_notify_iot = psc->sc_bars_iot[i]; 468 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier); 469 470 if (have_device_cfg) { 471 i = bars_idx[device.bar]; 472 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 473 device.offset, device.length, 474 &sc->sc_devcfg_ioh)) { 475 aprint_error_dev(self, "can't map devcfg i/o space\n"); 476 ret = EIO; 477 goto err; 478 } 479 aprint_debug_dev(self, 480 "device.offset = 0x%x, device.length = 0x%x\n", 481 device.offset, device.length); 482 sc->sc_devcfg_iosize = device.length; 483 sc->sc_devcfg_iot = psc->sc_bars_iot[i]; 484 } 485 486 i = bars_idx[isr.bar]; 487 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 488 isr.offset, isr.length, &psc->sc_isr_ioh)) { 489 aprint_error_dev(self, "can't map isr i/o space\n"); 490 ret = EIO; 491 goto err; 492 } 493 psc->sc_isr_iosize = isr.length; 494 psc->sc_isr_iot = psc->sc_bars_iot[i]; 495 496 i = bars_idx[common.bar]; 497 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 498 common.offset, common.length, &psc->sc_ioh)) { 499 aprint_error_dev(self, "can't map common i/o space\n"); 500 ret = EIO; 501 goto err; 502 } 503 psc->sc_iosize = common.length; 504 psc->sc_iot = psc->sc_bars_iot[i]; 505 psc->sc_mapped_iosize = psc->sc_bars_iosize[i]; 506 507 psc->sc_sc.sc_version_1 = 1; 508 509 /* set our version 1.0 ops */ 510 sc->sc_ops = &virtio_pci_ops_10; 511 sc->sc_bus_endian = READ_ENDIAN_10; 512 sc->sc_struct_endian = STRUCT_ENDIAN_10; 513 return 0; 514 515 err: 516 /* undo our pci_mapreg_map()s */ 517 for (i = 0; i < __arraycount(bars); i++) { 518 if (psc->sc_bars_iosize[i] == 0) 519 continue; 520 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 521 psc->sc_bars_iosize[i]); 522 } 523 return ret; 524 } 525 526 /* v1.0 attach helper */ 527 static int 528 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen) 529 { 530 device_t self = psc->sc_sc.sc_dev; 531 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 532 pcitag_t tag = psc->sc_pa.pa_tag; 533 unsigned int offset, i, len; 534 union { 535 pcireg_t reg[8]; 536 struct virtio_pci_cap vcap; 537 } *v = buf; 538 539 if (buflen < sizeof(struct virtio_pci_cap)) 540 return ERANGE; 541 542 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 543 return ENOENT; 544 545 do { 546 for (i = 0; i < 4; i++) 547 v->reg[i] = 548 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 549 if (v->vcap.cfg_type == cfg_type) 550 break; 551 offset = v->vcap.cap_next; 552 } while (offset != 0); 553 554 if (offset == 0) 555 return ENOENT; 556 557 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 558 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 559 if (len > buflen) { 560 aprint_error_dev(self, "%s cap too large\n", __func__); 561 return ERANGE; 562 } 563 for (i = 4; i < len / sizeof(pcireg_t); i++) 564 v->reg[i] = 565 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 566 } 567 568 /* endian fixup */ 569 v->vcap.offset = le32toh(v->vcap.offset); 570 v->vcap.length = le32toh(v->vcap.length); 571 return 0; 572 } 573 574 575 /* ------------------------------------- 576 * Version 0.9 support 577 * -------------------------------------*/ 578 579 static void 580 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx) 581 { 582 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 583 584 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx); 585 } 586 587 /* only applicable for v 0.9 but also called for 1.0 */ 588 static int 589 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc) 590 { 591 struct virtio_softc * const sc = &psc->sc_sc; 592 device_t self = sc->sc_dev; 593 594 if (psc->sc_sc.sc_version_1) 595 return 0; 596 597 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset; 598 sc->sc_devcfg_iot = psc->sc_iot; 599 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 600 psc->sc_devcfg_offset, sc->sc_devcfg_iosize, 601 &sc->sc_devcfg_ioh)) { 602 aprint_error_dev(self, "can't map config i/o space\n"); 603 return EIO; 604 } 605 606 return 0; 607 } 608 609 static uint16_t 610 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx) 611 { 612 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 613 614 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 615 VIRTIO_CONFIG_QUEUE_SELECT, idx); 616 return bus_space_read_2(psc->sc_iot, psc->sc_ioh, 617 VIRTIO_CONFIG_QUEUE_SIZE); 618 } 619 620 static void 621 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 622 { 623 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 624 625 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 626 VIRTIO_CONFIG_QUEUE_SELECT, idx); 627 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 628 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 629 630 if (psc->sc_ihs_num > 1) { 631 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 632 if (sc->sc_child_mq) 633 vec += idx; 634 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 635 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); 636 } 637 } 638 639 static void 640 virtio_pci_set_status_09(struct virtio_softc *sc, int status) 641 { 642 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 643 int old = 0; 644 645 if (status != 0) { 646 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh, 647 VIRTIO_CONFIG_DEVICE_STATUS); 648 } 649 bus_space_write_1(psc->sc_iot, psc->sc_ioh, 650 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 651 } 652 653 static void 654 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features) 655 { 656 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 657 uint32_t r; 658 659 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh, 660 VIRTIO_CONFIG_DEVICE_FEATURES); 661 662 r &= guest_features; 663 664 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 665 VIRTIO_CONFIG_GUEST_FEATURES, r); 666 667 sc->sc_active_features = r; 668 } 669 670 /* ------------------------------------- 671 * Version 1.0 support 672 * -------------------------------------*/ 673 674 static void 675 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx) 676 { 677 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 678 unsigned offset = sc->sc_vqs[idx].vq_notify_off * 679 psc->sc_notify_off_multiplier; 680 681 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx); 682 } 683 684 685 static uint16_t 686 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx) 687 { 688 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 689 bus_space_tag_t iot = psc->sc_iot; 690 bus_space_handle_t ioh = psc->sc_ioh; 691 692 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx); 693 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE); 694 } 695 696 /* 697 * By definition little endian only in v1.0. NB: "MAY" in the text 698 * below refers to "independently" (i.e. the order of accesses) not 699 * "32-bit" (which is restricted by the earlier "MUST"). 700 * 701 * 4.1.3.1 Driver Requirements: PCI Device Layout 702 * 703 * For device configuration access, the driver MUST use ... 32-bit 704 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit 705 * fields, the driver MAY access each of the high and low 32-bit parts 706 * of the field independently. 707 */ 708 static __inline void 709 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh, 710 bus_size_t offset, uint64_t value) 711 { 712 #if _QUAD_HIGHWORD 713 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value)); 714 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value)); 715 #else 716 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value)); 717 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value)); 718 #endif 719 } 720 721 static void 722 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 723 { 724 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 725 struct virtqueue *vq = &sc->sc_vqs[idx]; 726 bus_space_tag_t iot = psc->sc_iot; 727 bus_space_handle_t ioh = psc->sc_ioh; 728 KASSERT(vq->vq_index == idx); 729 730 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 731 if (addr == 0) { 732 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0); 733 virtio_pci_bus_space_write_8(iot, ioh, 734 VIRTIO_CONFIG1_QUEUE_DESC, 0); 735 virtio_pci_bus_space_write_8(iot, ioh, 736 VIRTIO_CONFIG1_QUEUE_AVAIL, 0); 737 virtio_pci_bus_space_write_8(iot, ioh, 738 VIRTIO_CONFIG1_QUEUE_USED, 0); 739 } else { 740 virtio_pci_bus_space_write_8(iot, ioh, 741 VIRTIO_CONFIG1_QUEUE_DESC, addr); 742 virtio_pci_bus_space_write_8(iot, ioh, 743 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 744 virtio_pci_bus_space_write_8(iot, ioh, 745 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 746 bus_space_write_2(iot, ioh, 747 VIRTIO_CONFIG1_QUEUE_ENABLE, 1); 748 vq->vq_notify_off = bus_space_read_2(iot, ioh, 749 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF); 750 } 751 752 if (psc->sc_ihs_num > 1) { 753 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 754 if (sc->sc_child_mq) 755 vec += idx; 756 bus_space_write_2(iot, ioh, 757 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); 758 } 759 } 760 761 static void 762 virtio_pci_set_status_10(struct virtio_softc *sc, int status) 763 { 764 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 765 bus_space_tag_t iot = psc->sc_iot; 766 bus_space_handle_t ioh = psc->sc_ioh; 767 int old = 0; 768 769 if (status) 770 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 771 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old); 772 } 773 774 void 775 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features) 776 { 777 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 778 device_t self = sc->sc_dev; 779 bus_space_tag_t iot = psc->sc_iot; 780 bus_space_handle_t ioh = psc->sc_ioh; 781 uint64_t host, negotiated, device_status; 782 783 guest_features |= VIRTIO_F_VERSION_1; 784 /* notify on empty is 0.9 only */ 785 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 786 sc->sc_active_features = 0; 787 788 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0); 789 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE); 790 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1); 791 host |= (uint64_t) 792 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32; 793 794 negotiated = host & guest_features; 795 796 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0); 797 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 798 negotiated & 0xffffffff); 799 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1); 800 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 801 negotiated >> 32); 802 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 803 804 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 805 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 806 aprint_error_dev(self, "feature negotiation failed\n"); 807 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 808 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 809 return; 810 } 811 812 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 813 aprint_error_dev(self, "host rejected version 1\n"); 814 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 815 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 816 return; 817 } 818 819 sc->sc_active_features = negotiated; 820 return; 821 } 822 823 824 /* ------------------------------------- 825 * Generic PCI interrupt code 826 * -------------------------------------*/ 827 828 static int 829 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit) 830 { 831 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 832 bus_space_tag_t iot = psc->sc_iot; 833 bus_space_handle_t ioh = psc->sc_ioh; 834 int vector, ret, qid; 835 836 if (!virtio_pci_msix_enabled(psc)) 837 return 0; 838 839 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 840 bus_space_write_2(iot, ioh, 841 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector); 842 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR); 843 if (ret != vector) { 844 VIRTIO_PCI_LOG(sc, reinit, 845 "can't set config msix vector\n"); 846 return -1; 847 } 848 849 for (qid = 0; qid < sc->sc_nvqs; qid++) { 850 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 851 852 if (sc->sc_child_mq) 853 vector += qid; 854 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); 855 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, 856 vector); 857 ret = bus_space_read_2(iot, ioh, 858 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR); 859 if (ret != vector) { 860 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 861 "msix vector\n", qid); 862 return -1; 863 } 864 } 865 866 return 0; 867 } 868 869 static int 870 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit) 871 { 872 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 873 int offset, vector, ret, qid; 874 875 if (!virtio_pci_msix_enabled(psc)) 876 return 0; 877 878 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 879 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 880 881 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 882 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 883 if (ret != vector) { 884 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n", 885 __func__, vector, ret); 886 VIRTIO_PCI_LOG(sc, reinit, 887 "can't set config msix vector\n"); 888 return -1; 889 } 890 891 for (qid = 0; qid < sc->sc_nvqs; qid++) { 892 offset = VIRTIO_CONFIG_QUEUE_SELECT; 893 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid); 894 895 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 896 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 897 898 if (sc->sc_child_mq) 899 vector += qid; 900 901 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 902 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 903 if (ret != vector) { 904 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:" 905 " expected=%d, actual=%d\n", 906 __func__, qid, vector, ret); 907 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 908 "msix vector\n", qid); 909 return -1; 910 } 911 } 912 913 return 0; 914 } 915 916 static int 917 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc, 918 struct pci_attach_args *pa) 919 { 920 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 921 device_t self = sc->sc_dev; 922 pci_chipset_tag_t pc = pa->pa_pc; 923 struct virtqueue *vq; 924 char intrbuf[PCI_INTRSTR_LEN]; 925 char intr_xname[INTRDEVNAMEBUF]; 926 char const *intrstr; 927 int idx, qid, n; 928 929 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 930 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 931 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 932 933 snprintf(intr_xname, sizeof(intr_xname), "%s config", 934 device_xname(sc->sc_dev)); 935 936 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 937 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname); 938 if (psc->sc_ihs[idx] == NULL) { 939 aprint_error_dev(self, "couldn't establish MSI-X for config\n"); 940 goto error; 941 } 942 943 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 944 if (sc->sc_child_mq) { 945 for (qid = 0; qid < sc->sc_nvqs; qid++) { 946 n = idx + qid; 947 vq = &sc->sc_vqs[qid]; 948 949 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 950 device_xname(sc->sc_dev), qid); 951 952 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 953 pci_intr_setattr(pc, &psc->sc_ihp[n], 954 PCI_INTR_MPSAFE, true); 955 } 956 957 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n], 958 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname); 959 if (psc->sc_ihs[n] == NULL) { 960 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n"); 961 goto error; 962 } 963 } 964 } else { 965 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 966 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 967 968 snprintf(intr_xname, sizeof(intr_xname), "%s queues", 969 device_xname(sc->sc_dev)); 970 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 971 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname); 972 if (psc->sc_ihs[idx] == NULL) { 973 aprint_error_dev(self, "couldn't establish MSI-X for queues\n"); 974 goto error; 975 } 976 } 977 978 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 979 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 980 aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 981 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 982 if (sc->sc_child_mq) { 983 kcpuset_t *affinity; 984 int affinity_to, r; 985 986 kcpuset_create(&affinity, false); 987 988 for (qid = 0; qid < sc->sc_nvqs; qid++) { 989 n = idx + qid; 990 affinity_to = (qid / 2) % ncpu; 991 992 intrstr = pci_intr_string(pc, psc->sc_ihp[n], 993 intrbuf, sizeof(intrbuf)); 994 995 kcpuset_zero(affinity); 996 kcpuset_set(affinity, affinity_to); 997 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL); 998 if (r == 0) { 999 aprint_normal_dev(self, 1000 "for vq #%d interrupting at %s affinity to %u\n", 1001 qid, intrstr, affinity_to); 1002 } else { 1003 aprint_normal_dev(self, 1004 "for vq #%d interrupting at %s\n", 1005 qid, intrstr); 1006 } 1007 } 1008 1009 kcpuset_destroy(affinity); 1010 } else { 1011 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 1012 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr); 1013 } 1014 1015 return 0; 1016 1017 error: 1018 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1019 if (psc->sc_ihs[idx] != NULL) 1020 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1021 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1022 if (sc->sc_child_mq) { 1023 for (qid = 0; qid < sc->sc_nvqs; qid++) { 1024 n = idx + qid; 1025 if (psc->sc_ihs[n] == NULL) 1026 continue; 1027 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]); 1028 } 1029 1030 } else { 1031 if (psc->sc_ihs[idx] != NULL) 1032 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1033 } 1034 1035 return -1; 1036 } 1037 1038 static int 1039 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc, 1040 struct pci_attach_args *pa) 1041 { 1042 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1043 device_t self = sc->sc_dev; 1044 pci_chipset_tag_t pc = pa->pa_pc; 1045 char intrbuf[PCI_INTRSTR_LEN]; 1046 char const *intrstr; 1047 1048 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1049 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true); 1050 1051 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0], 1052 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev)); 1053 if (psc->sc_ihs[0] == NULL) { 1054 aprint_error_dev(self, "couldn't establish INTx\n"); 1055 return -1; 1056 } 1057 1058 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf)); 1059 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1060 1061 return 0; 1062 } 1063 1064 static int 1065 virtio_pci_alloc_interrupts(struct virtio_softc *sc) 1066 { 1067 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1068 device_t self = sc->sc_dev; 1069 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1070 pcitag_t tag = psc->sc_pa.pa_tag; 1071 int error; 1072 int nmsix; 1073 int off; 1074 int counts[PCI_INTR_TYPE_SIZE]; 1075 pci_intr_type_t max_type; 1076 pcireg_t ctl; 1077 1078 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag); 1079 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 1080 1081 /* We need at least two: one for config and the other for queues */ 1082 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) { 1083 /* Try INTx only */ 1084 max_type = PCI_INTR_TYPE_INTX; 1085 counts[PCI_INTR_TYPE_INTX] = 1; 1086 } else { 1087 /* Try MSI-X first and INTx second */ 1088 if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { 1089 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1090 } else { 1091 sc->sc_child_mq = false; 1092 } 1093 1094 if (sc->sc_child_mq == false) { 1095 nmsix = 2; 1096 } 1097 1098 max_type = PCI_INTR_TYPE_MSIX; 1099 counts[PCI_INTR_TYPE_MSIX] = nmsix; 1100 counts[PCI_INTR_TYPE_MSI] = 0; 1101 counts[PCI_INTR_TYPE_INTX] = 1; 1102 } 1103 1104 retry: 1105 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type); 1106 if (error != 0) { 1107 aprint_error_dev(self, "couldn't map interrupt\n"); 1108 return -1; 1109 } 1110 1111 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 1112 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, 1113 KM_SLEEP); 1114 1115 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa); 1116 if (error != 0) { 1117 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix); 1118 pci_intr_release(pc, psc->sc_ihp, nmsix); 1119 1120 /* Retry INTx */ 1121 max_type = PCI_INTR_TYPE_INTX; 1122 counts[PCI_INTR_TYPE_INTX] = 1; 1123 goto retry; 1124 } 1125 1126 psc->sc_ihs_num = nmsix; 1127 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1128 virtio_pci_adjust_config_region(psc); 1129 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 1130 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, 1131 KM_SLEEP); 1132 1133 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa); 1134 if (error != 0) { 1135 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1); 1136 pci_intr_release(pc, psc->sc_ihp, 1); 1137 return -1; 1138 } 1139 1140 psc->sc_ihs_num = 1; 1141 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1142 virtio_pci_adjust_config_region(psc); 1143 1144 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL); 1145 if (error != 0) { 1146 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 1147 ctl &= ~PCI_MSIX_CTL_ENABLE; 1148 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 1149 } 1150 } 1151 1152 return 0; 1153 } 1154 1155 static void 1156 virtio_pci_free_interrupts(struct virtio_softc *sc) 1157 { 1158 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1159 1160 for (int i = 0; i < psc->sc_ihs_num; i++) { 1161 if (psc->sc_ihs[i] == NULL) 1162 continue; 1163 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]); 1164 psc->sc_ihs[i] = NULL; 1165 } 1166 1167 if (psc->sc_ihs_num > 0) 1168 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num); 1169 1170 if (psc->sc_ihs != NULL) { 1171 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num); 1172 psc->sc_ihs = NULL; 1173 } 1174 psc->sc_ihs_num = 0; 1175 } 1176 1177 static bool 1178 virtio_pci_msix_enabled(struct virtio_pci_softc *psc) 1179 { 1180 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1181 1182 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) 1183 return true; 1184 1185 return false; 1186 } 1187 1188 /* 1189 * Interrupt handler. 1190 */ 1191 static int 1192 virtio_pci_intr(void *arg) 1193 { 1194 struct virtio_softc *sc = arg; 1195 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1196 int isr, r = 0; 1197 1198 /* check and ack the interrupt */ 1199 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0); 1200 if (isr == 0) 1201 return 0; 1202 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1203 (sc->sc_config_change != NULL)) 1204 r = (sc->sc_config_change)(sc); 1205 if (sc->sc_intrhand != NULL) { 1206 if (sc->sc_soft_ih != NULL) 1207 softint_schedule(sc->sc_soft_ih); 1208 else 1209 r |= (sc->sc_intrhand)(sc); 1210 } 1211 1212 return r; 1213 } 1214 1215 static int 1216 virtio_pci_msix_queue_intr(void *arg) 1217 { 1218 struct virtio_softc *sc = arg; 1219 int r = 0; 1220 1221 if (sc->sc_intrhand != NULL) { 1222 if (sc->sc_soft_ih != NULL) 1223 softint_schedule(sc->sc_soft_ih); 1224 else 1225 r |= (sc->sc_intrhand)(sc); 1226 } 1227 1228 return r; 1229 } 1230 1231 static int 1232 virtio_pci_msix_config_intr(void *arg) 1233 { 1234 struct virtio_softc *sc = arg; 1235 int r = 0; 1236 1237 if (sc->sc_config_change != NULL) 1238 r = (sc->sc_config_change)(sc); 1239 return r; 1240 } 1241 1242 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio"); 1243 1244 #ifdef _MODULE 1245 #include "ioconf.c" 1246 #endif 1247 1248 static int 1249 virtio_pci_modcmd(modcmd_t cmd, void *opaque) 1250 { 1251 int error = 0; 1252 1253 #ifdef _MODULE 1254 switch (cmd) { 1255 case MODULE_CMD_INIT: 1256 error = config_init_component(cfdriver_ioconf_virtio_pci, 1257 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1258 break; 1259 case MODULE_CMD_FINI: 1260 error = config_fini_component(cfdriver_ioconf_virtio_pci, 1261 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1262 break; 1263 default: 1264 error = ENOTTY; 1265 break; 1266 } 1267 #endif 1268 1269 return error; 1270 } 1271