1 /* $NetBSD: virtio_pci.c,v 1.42 2023/04/19 00:23:45 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * Copyright (c) 2012 Stefan Fritsch. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.42 2023/04/19 00:23:45 yamaguchi Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kmem.h> 36 #include <sys/module.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/syslog.h> 40 41 #include <sys/device.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 48 #include <dev/pci/virtio_pcireg.h> 49 50 #define VIRTIO_PRIVATE 51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 52 53 54 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \ 55 do { \ 56 if ((_use_log)) { \ 57 log(LOG_DEBUG, "%s: " _fmt, \ 58 device_xname((_sc)->sc_dev), \ 59 ##_args); \ 60 } else { \ 61 aprint_error_dev((_sc)->sc_dev, \ 62 _fmt, ##_args); \ 63 } \ 64 } while(0) 65 66 static int virtio_pci_match(device_t, cfdata_t, void *); 67 static void virtio_pci_attach(device_t, device_t, void *); 68 static int virtio_pci_rescan(device_t, const char *, const int *); 69 static int virtio_pci_detach(device_t, int); 70 71 72 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 73 sizeof(pcireg_t)) 74 struct virtio_pci_softc { 75 struct virtio_softc sc_sc; 76 bool sc_intr_pervq; 77 78 /* IO space */ 79 bus_space_tag_t sc_iot; 80 bus_space_handle_t sc_ioh; 81 bus_size_t sc_iosize; 82 bus_size_t sc_mapped_iosize; 83 84 /* BARs */ 85 bus_space_tag_t sc_bars_iot[NMAPREG]; 86 bus_space_handle_t sc_bars_ioh[NMAPREG]; 87 bus_size_t sc_bars_iosize[NMAPREG]; 88 89 /* notify space */ 90 bus_space_tag_t sc_notify_iot; 91 bus_space_handle_t sc_notify_ioh; 92 bus_size_t sc_notify_iosize; 93 uint32_t sc_notify_off_multiplier; 94 95 /* isr space */ 96 bus_space_tag_t sc_isr_iot; 97 bus_space_handle_t sc_isr_ioh; 98 bus_size_t sc_isr_iosize; 99 100 /* generic */ 101 struct pci_attach_args sc_pa; 102 pci_intr_handle_t *sc_ihp; 103 void **sc_ihs; 104 int sc_ihs_num; 105 int sc_devcfg_offset; /* for 0.9 */ 106 }; 107 108 static int virtio_pci_attach_09(device_t, void *); 109 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t); 110 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t); 111 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t); 112 static void virtio_pci_set_status_09(struct virtio_softc *, int); 113 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t); 114 115 static int virtio_pci_attach_10(device_t, void *); 116 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t); 117 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t); 118 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t); 119 static void virtio_pci_set_status_10(struct virtio_softc *, int); 120 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t); 121 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen); 122 123 static int virtio_pci_alloc_interrupts(struct virtio_softc *); 124 static void virtio_pci_free_interrupts(struct virtio_softc *); 125 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc); 126 static int virtio_pci_intr(void *arg); 127 static int virtio_pci_msix_queue_intr(void *); 128 static int virtio_pci_msix_config_intr(void *); 129 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int); 130 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int); 131 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *, 132 struct pci_attach_args *); 133 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *, 134 struct pci_attach_args *); 135 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *); 136 137 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 138 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 139 140 /* 141 * When using PCI attached virtio on aarch64-eb under Qemu, the IO space 142 * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read 143 * 1 byte at a time seem OK but reading bigger lengths result in swapped 144 * endian. This is most notable on reading 8 byters since we can't use 145 * bus_space_{read,write}_8(). 146 */ 147 148 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN 149 # define READ_ENDIAN_09 BIG_ENDIAN /* should be LITTLE_ENDIAN */ 150 # define READ_ENDIAN_10 BIG_ENDIAN 151 # define STRUCT_ENDIAN_09 BIG_ENDIAN 152 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 153 #elif BYTE_ORDER == BIG_ENDIAN 154 # define READ_ENDIAN_09 LITTLE_ENDIAN 155 # define READ_ENDIAN_10 BIG_ENDIAN 156 # define STRUCT_ENDIAN_09 BIG_ENDIAN 157 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 158 #else /* little endian */ 159 # define READ_ENDIAN_09 LITTLE_ENDIAN 160 # define READ_ENDIAN_10 LITTLE_ENDIAN 161 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN 162 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 163 #endif 164 165 166 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc), 167 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL, 168 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN); 169 170 static const struct virtio_ops virtio_pci_ops_09 = { 171 .kick = virtio_pci_kick_09, 172 .read_queue_size = virtio_pci_read_queue_size_09, 173 .setup_queue = virtio_pci_setup_queue_09, 174 .set_status = virtio_pci_set_status_09, 175 .neg_features = virtio_pci_negotiate_features_09, 176 .alloc_interrupts = virtio_pci_alloc_interrupts, 177 .free_interrupts = virtio_pci_free_interrupts, 178 .setup_interrupts = virtio_pci_setup_interrupts_09, 179 }; 180 181 static const struct virtio_ops virtio_pci_ops_10 = { 182 .kick = virtio_pci_kick_10, 183 .read_queue_size = virtio_pci_read_queue_size_10, 184 .setup_queue = virtio_pci_setup_queue_10, 185 .set_status = virtio_pci_set_status_10, 186 .neg_features = virtio_pci_negotiate_features_10, 187 .alloc_interrupts = virtio_pci_alloc_interrupts, 188 .free_interrupts = virtio_pci_free_interrupts, 189 .setup_interrupts = virtio_pci_setup_interrupts_10, 190 }; 191 192 static int 193 virtio_pci_match(device_t parent, cfdata_t match, void *aux) 194 { 195 struct pci_attach_args *pa; 196 197 pa = (struct pci_attach_args *)aux; 198 switch (PCI_VENDOR(pa->pa_id)) { 199 case PCI_VENDOR_QUMRANET: 200 /* Transitional devices MUST have a PCI Revision ID of 0. */ 201 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 202 PCI_PRODUCT(pa->pa_id)) && 203 (PCI_PRODUCT(pa->pa_id) <= 204 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) && 205 PCI_REVISION(pa->pa_class) == 0) 206 return 1; 207 /* 208 * Non-transitional devices SHOULD have a PCI Revision 209 * ID of 1 or higher. Drivers MUST match any PCI 210 * Revision ID value. 211 */ 212 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <= 213 PCI_PRODUCT(pa->pa_id)) && 214 (PCI_PRODUCT(pa->pa_id) <= 215 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) && 216 /* XXX: TODO */ 217 PCI_REVISION(pa->pa_class) == 1) 218 return 1; 219 break; 220 } 221 222 return 0; 223 } 224 225 static void 226 virtio_pci_attach(device_t parent, device_t self, void *aux) 227 { 228 struct virtio_pci_softc * const psc = device_private(self); 229 struct virtio_softc * const sc = &psc->sc_sc; 230 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 231 pci_chipset_tag_t pc = pa->pa_pc; 232 pcitag_t tag = pa->pa_tag; 233 int revision; 234 int ret; 235 pcireg_t id; 236 pcireg_t csr; 237 238 revision = PCI_REVISION(pa->pa_class); 239 switch (revision) { 240 case 0: 241 /* subsystem ID shows what I am */ 242 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 243 break; 244 case 1: 245 /* pci product number shows what I am */ 246 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040; 247 break; 248 default: 249 aprint_normal(": unknown revision 0x%02x; giving up\n", 250 revision); 251 return; 252 } 253 254 aprint_normal("\n"); 255 aprint_naive("\n"); 256 virtio_print_device_type(self, id, revision); 257 258 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 259 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE; 260 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 261 262 sc->sc_dev = self; 263 psc->sc_pa = *pa; 264 psc->sc_iot = pa->pa_iot; 265 266 sc->sc_dmat = pa->pa_dmat; 267 if (pci_dma64_available(pa)) 268 sc->sc_dmat = pa->pa_dmat64; 269 270 /* attach is dependent on revision */ 271 ret = 0; 272 if (revision == 1) { 273 /* try to attach 1.0 */ 274 ret = virtio_pci_attach_10(self, aux); 275 } 276 if (ret == 0 && revision == 0) { 277 /* revision 0 means 0.9 only or both 0.9 and 1.0 */ 278 ret = virtio_pci_attach_09(self, aux); 279 } 280 if (ret) { 281 aprint_error_dev(self, "cannot attach (%d)\n", ret); 282 return; 283 } 284 KASSERT(sc->sc_ops); 285 286 /* preset config region */ 287 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 288 if (virtio_pci_adjust_config_region(psc)) 289 return; 290 291 /* generic */ 292 virtio_device_reset(sc); 293 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 294 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 295 296 sc->sc_childdevid = id; 297 sc->sc_child = NULL; 298 virtio_pci_rescan(self, NULL, NULL); 299 return; 300 } 301 302 /* ARGSUSED */ 303 static int 304 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs) 305 { 306 struct virtio_pci_softc * const psc = device_private(self); 307 struct virtio_softc * const sc = &psc->sc_sc; 308 struct virtio_attach_args va; 309 310 if (sc->sc_child) /* Child already attached? */ 311 return 0; 312 313 memset(&va, 0, sizeof(va)); 314 va.sc_childdevid = sc->sc_childdevid; 315 316 config_found(self, &va, NULL, CFARGS_NONE); 317 318 if (virtio_attach_failed(sc)) 319 return 0; 320 321 return 0; 322 } 323 324 325 static int 326 virtio_pci_detach(device_t self, int flags) 327 { 328 struct virtio_pci_softc * const psc = device_private(self); 329 struct virtio_softc * const sc = &psc->sc_sc; 330 int r; 331 332 r = config_detach_children(self, flags); 333 if (r != 0) 334 return r; 335 336 /* Check that child never attached, or detached properly */ 337 KASSERT(sc->sc_child == NULL); 338 KASSERT(sc->sc_vqs == NULL); 339 KASSERT(psc->sc_ihs_num == 0); 340 341 if (psc->sc_iosize) 342 bus_space_unmap(psc->sc_iot, psc->sc_ioh, 343 psc->sc_mapped_iosize); 344 psc->sc_iosize = 0; 345 346 return 0; 347 } 348 349 350 static int 351 virtio_pci_attach_09(device_t self, void *aux) 352 //struct virtio_pci_softc *psc, struct pci_attach_args *pa) 353 { 354 struct virtio_pci_softc * const psc = device_private(self); 355 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 356 struct virtio_softc * const sc = &psc->sc_sc; 357 // pci_chipset_tag_t pc = pa->pa_pc; 358 // pcitag_t tag = pa->pa_tag; 359 360 /* complete IO region */ 361 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 362 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) { 363 aprint_error_dev(self, "can't map i/o space\n"); 364 return EIO; 365 } 366 psc->sc_mapped_iosize = psc->sc_iosize; 367 368 /* queue space */ 369 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 370 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) { 371 aprint_error_dev(self, "can't map notify i/o space\n"); 372 return EIO; 373 } 374 psc->sc_notify_iosize = 2; 375 psc->sc_notify_iot = psc->sc_iot; 376 377 /* ISR space */ 378 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 379 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) { 380 aprint_error_dev(self, "can't map isr i/o space\n"); 381 return EIO; 382 } 383 psc->sc_isr_iosize = 1; 384 psc->sc_isr_iot = psc->sc_iot; 385 386 /* set our version 0.9 ops */ 387 sc->sc_ops = &virtio_pci_ops_09; 388 sc->sc_bus_endian = READ_ENDIAN_09; 389 sc->sc_struct_endian = STRUCT_ENDIAN_09; 390 return 0; 391 } 392 393 394 static int 395 virtio_pci_attach_10(device_t self, void *aux) 396 { 397 struct virtio_pci_softc * const psc = device_private(self); 398 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 399 struct virtio_softc * const sc = &psc->sc_sc; 400 pci_chipset_tag_t pc = pa->pa_pc; 401 pcitag_t tag = pa->pa_tag; 402 403 struct virtio_pci_cap common, isr, device; 404 struct virtio_pci_notify_cap notify; 405 int have_device_cfg = 0; 406 bus_size_t bars[NMAPREG] = { 0 }; 407 int bars_idx[NMAPREG] = { 0 }; 408 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap }; 409 int i, j, ret = 0; 410 411 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG, 412 &common, sizeof(common))) 413 return ENODEV; 414 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG, 415 ¬ify, sizeof(notify))) 416 return ENODEV; 417 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG, 418 &isr, sizeof(isr))) 419 return ENODEV; 420 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG, 421 &device, sizeof(device))) 422 memset(&device, 0, sizeof(device)); 423 else 424 have_device_cfg = 1; 425 426 /* Figure out which bars we need to map */ 427 for (i = 0; i < __arraycount(caps); i++) { 428 int bar = caps[i]->bar; 429 bus_size_t len = caps[i]->offset + caps[i]->length; 430 if (caps[i]->length == 0) 431 continue; 432 if (bars[bar] < len) 433 bars[bar] = len; 434 } 435 436 for (i = j = 0; i < __arraycount(bars); i++) { 437 int reg; 438 pcireg_t type; 439 if (bars[i] == 0) 440 continue; 441 reg = PCI_BAR(i); 442 type = pci_mapreg_type(pc, tag, reg); 443 if (pci_mapreg_map(pa, reg, type, 0, 444 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j], 445 NULL, &psc->sc_bars_iosize[j])) { 446 aprint_error_dev(self, "can't map bar %u \n", i); 447 ret = EIO; 448 goto err; 449 } 450 aprint_debug_dev(self, 451 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n", 452 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]); 453 bars_idx[i] = j; 454 j++; 455 } 456 457 i = bars_idx[notify.cap.bar]; 458 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 459 notify.cap.offset, notify.cap.length, 460 &psc->sc_notify_ioh)) { 461 aprint_error_dev(self, "can't map notify i/o space\n"); 462 ret = EIO; 463 goto err; 464 } 465 psc->sc_notify_iosize = notify.cap.length; 466 psc->sc_notify_iot = psc->sc_bars_iot[i]; 467 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier); 468 469 if (have_device_cfg) { 470 i = bars_idx[device.bar]; 471 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 472 device.offset, device.length, 473 &sc->sc_devcfg_ioh)) { 474 aprint_error_dev(self, "can't map devcfg i/o space\n"); 475 ret = EIO; 476 goto err; 477 } 478 aprint_debug_dev(self, 479 "device.offset = 0x%x, device.length = 0x%x\n", 480 device.offset, device.length); 481 sc->sc_devcfg_iosize = device.length; 482 sc->sc_devcfg_iot = psc->sc_bars_iot[i]; 483 } 484 485 i = bars_idx[isr.bar]; 486 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 487 isr.offset, isr.length, &psc->sc_isr_ioh)) { 488 aprint_error_dev(self, "can't map isr i/o space\n"); 489 ret = EIO; 490 goto err; 491 } 492 psc->sc_isr_iosize = isr.length; 493 psc->sc_isr_iot = psc->sc_bars_iot[i]; 494 495 i = bars_idx[common.bar]; 496 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 497 common.offset, common.length, &psc->sc_ioh)) { 498 aprint_error_dev(self, "can't map common i/o space\n"); 499 ret = EIO; 500 goto err; 501 } 502 psc->sc_iosize = common.length; 503 psc->sc_iot = psc->sc_bars_iot[i]; 504 psc->sc_mapped_iosize = psc->sc_bars_iosize[i]; 505 506 psc->sc_sc.sc_version_1 = 1; 507 508 /* set our version 1.0 ops */ 509 sc->sc_ops = &virtio_pci_ops_10; 510 sc->sc_bus_endian = READ_ENDIAN_10; 511 sc->sc_struct_endian = STRUCT_ENDIAN_10; 512 return 0; 513 514 err: 515 /* undo our pci_mapreg_map()s */ 516 for (i = 0; i < __arraycount(bars); i++) { 517 if (psc->sc_bars_iosize[i] == 0) 518 continue; 519 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 520 psc->sc_bars_iosize[i]); 521 } 522 return ret; 523 } 524 525 /* v1.0 attach helper */ 526 static int 527 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen) 528 { 529 device_t self = psc->sc_sc.sc_dev; 530 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 531 pcitag_t tag = psc->sc_pa.pa_tag; 532 unsigned int offset, i, len; 533 union { 534 pcireg_t reg[8]; 535 struct virtio_pci_cap vcap; 536 } *v = buf; 537 538 if (buflen < sizeof(struct virtio_pci_cap)) 539 return ERANGE; 540 541 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0])) 542 return ENOENT; 543 544 do { 545 for (i = 0; i < 4; i++) 546 v->reg[i] = 547 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 548 if (v->vcap.cfg_type == cfg_type) 549 break; 550 offset = v->vcap.cap_next; 551 } while (offset != 0); 552 553 if (offset == 0) 554 return ENOENT; 555 556 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 557 len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 558 if (len > buflen) { 559 aprint_error_dev(self, "%s cap too large\n", __func__); 560 return ERANGE; 561 } 562 for (i = 4; i < len / sizeof(pcireg_t); i++) 563 v->reg[i] = 564 le32toh(pci_conf_read(pc, tag, offset + i * 4)); 565 } 566 567 /* endian fixup */ 568 v->vcap.offset = le32toh(v->vcap.offset); 569 v->vcap.length = le32toh(v->vcap.length); 570 return 0; 571 } 572 573 574 /* ------------------------------------- 575 * Version 0.9 support 576 * -------------------------------------*/ 577 578 static void 579 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx) 580 { 581 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 582 583 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx); 584 } 585 586 /* only applicable for v 0.9 but also called for 1.0 */ 587 static int 588 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc) 589 { 590 struct virtio_softc * const sc = &psc->sc_sc; 591 device_t self = sc->sc_dev; 592 593 if (psc->sc_sc.sc_version_1) 594 return 0; 595 596 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset; 597 sc->sc_devcfg_iot = psc->sc_iot; 598 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 599 psc->sc_devcfg_offset, sc->sc_devcfg_iosize, 600 &sc->sc_devcfg_ioh)) { 601 aprint_error_dev(self, "can't map config i/o space\n"); 602 return EIO; 603 } 604 605 return 0; 606 } 607 608 static uint16_t 609 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx) 610 { 611 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 612 613 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 614 VIRTIO_CONFIG_QUEUE_SELECT, idx); 615 return bus_space_read_2(psc->sc_iot, psc->sc_ioh, 616 VIRTIO_CONFIG_QUEUE_SIZE); 617 } 618 619 static void 620 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 621 { 622 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 623 624 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 625 VIRTIO_CONFIG_QUEUE_SELECT, idx); 626 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 627 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 628 629 if (psc->sc_ihs_num > 1) { 630 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 631 if (psc->sc_intr_pervq) 632 vec += idx; 633 bus_space_write_2(psc->sc_iot, psc->sc_ioh, 634 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); 635 } 636 } 637 638 static void 639 virtio_pci_set_status_09(struct virtio_softc *sc, int status) 640 { 641 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 642 int old = 0; 643 644 if (status != 0) { 645 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh, 646 VIRTIO_CONFIG_DEVICE_STATUS); 647 } 648 bus_space_write_1(psc->sc_iot, psc->sc_ioh, 649 VIRTIO_CONFIG_DEVICE_STATUS, status|old); 650 } 651 652 static void 653 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features) 654 { 655 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 656 uint32_t r; 657 658 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh, 659 VIRTIO_CONFIG_DEVICE_FEATURES); 660 661 r &= guest_features; 662 663 bus_space_write_4(psc->sc_iot, psc->sc_ioh, 664 VIRTIO_CONFIG_GUEST_FEATURES, r); 665 666 sc->sc_active_features = r; 667 } 668 669 /* ------------------------------------- 670 * Version 1.0 support 671 * -------------------------------------*/ 672 673 static void 674 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx) 675 { 676 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 677 unsigned offset = sc->sc_vqs[idx].vq_notify_off * 678 psc->sc_notify_off_multiplier; 679 680 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx); 681 } 682 683 684 static uint16_t 685 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx) 686 { 687 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 688 bus_space_tag_t iot = psc->sc_iot; 689 bus_space_handle_t ioh = psc->sc_ioh; 690 691 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx); 692 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE); 693 } 694 695 /* 696 * By definition little endian only in v1.0. NB: "MAY" in the text 697 * below refers to "independently" (i.e. the order of accesses) not 698 * "32-bit" (which is restricted by the earlier "MUST"). 699 * 700 * 4.1.3.1 Driver Requirements: PCI Device Layout 701 * 702 * For device configuration access, the driver MUST use ... 32-bit 703 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit 704 * fields, the driver MAY access each of the high and low 32-bit parts 705 * of the field independently. 706 */ 707 static __inline void 708 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh, 709 bus_size_t offset, uint64_t value) 710 { 711 #if _QUAD_HIGHWORD 712 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value)); 713 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value)); 714 #else 715 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value)); 716 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value)); 717 #endif 718 } 719 720 static void 721 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 722 { 723 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 724 struct virtqueue *vq = &sc->sc_vqs[idx]; 725 bus_space_tag_t iot = psc->sc_iot; 726 bus_space_handle_t ioh = psc->sc_ioh; 727 KASSERT(vq->vq_index == idx); 728 729 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 730 if (addr == 0) { 731 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0); 732 virtio_pci_bus_space_write_8(iot, ioh, 733 VIRTIO_CONFIG1_QUEUE_DESC, 0); 734 virtio_pci_bus_space_write_8(iot, ioh, 735 VIRTIO_CONFIG1_QUEUE_AVAIL, 0); 736 virtio_pci_bus_space_write_8(iot, ioh, 737 VIRTIO_CONFIG1_QUEUE_USED, 0); 738 } else { 739 virtio_pci_bus_space_write_8(iot, ioh, 740 VIRTIO_CONFIG1_QUEUE_DESC, addr); 741 virtio_pci_bus_space_write_8(iot, ioh, 742 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 743 virtio_pci_bus_space_write_8(iot, ioh, 744 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 745 bus_space_write_2(iot, ioh, 746 VIRTIO_CONFIG1_QUEUE_ENABLE, 1); 747 vq->vq_notify_off = bus_space_read_2(iot, ioh, 748 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF); 749 } 750 751 if (psc->sc_ihs_num > 1) { 752 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 753 if (psc->sc_intr_pervq) 754 vec += idx; 755 bus_space_write_2(iot, ioh, 756 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); 757 } 758 } 759 760 static void 761 virtio_pci_set_status_10(struct virtio_softc *sc, int status) 762 { 763 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 764 bus_space_tag_t iot = psc->sc_iot; 765 bus_space_handle_t ioh = psc->sc_ioh; 766 int old = 0; 767 768 if (status) 769 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 770 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old); 771 } 772 773 void 774 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features) 775 { 776 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 777 device_t self = sc->sc_dev; 778 bus_space_tag_t iot = psc->sc_iot; 779 bus_space_handle_t ioh = psc->sc_ioh; 780 uint64_t host, negotiated, device_status; 781 782 guest_features |= VIRTIO_F_VERSION_1; 783 /* notify on empty is 0.9 only */ 784 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 785 sc->sc_active_features = 0; 786 787 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0); 788 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE); 789 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1); 790 host |= (uint64_t) 791 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32; 792 793 negotiated = host & guest_features; 794 795 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0); 796 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 797 negotiated & 0xffffffff); 798 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1); 799 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 800 negotiated >> 32); 801 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 802 803 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 804 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 805 aprint_error_dev(self, "feature negotiation failed\n"); 806 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 807 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 808 return; 809 } 810 811 if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 812 aprint_error_dev(self, "host rejected version 1\n"); 813 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 814 VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 815 return; 816 } 817 818 sc->sc_active_features = negotiated; 819 return; 820 } 821 822 823 /* ------------------------------------- 824 * Generic PCI interrupt code 825 * -------------------------------------*/ 826 827 static int 828 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit) 829 { 830 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 831 bus_space_tag_t iot = psc->sc_iot; 832 bus_space_handle_t ioh = psc->sc_ioh; 833 int vector, ret, qid; 834 835 if (!virtio_pci_msix_enabled(psc)) 836 return 0; 837 838 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 839 bus_space_write_2(iot, ioh, 840 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector); 841 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR); 842 if (ret != vector) { 843 VIRTIO_PCI_LOG(sc, reinit, 844 "can't set config msix vector\n"); 845 return -1; 846 } 847 848 for (qid = 0; qid < sc->sc_nvqs; qid++) { 849 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 850 851 if (psc->sc_intr_pervq) 852 vector += qid; 853 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); 854 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, 855 vector); 856 ret = bus_space_read_2(iot, ioh, 857 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR); 858 if (ret != vector) { 859 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 860 "msix vector\n", qid); 861 return -1; 862 } 863 } 864 865 return 0; 866 } 867 868 static int 869 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit) 870 { 871 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 872 int offset, vector, ret, qid; 873 874 if (!virtio_pci_msix_enabled(psc)) 875 return 0; 876 877 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 878 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 879 880 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 881 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 882 if (ret != vector) { 883 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n", 884 __func__, vector, ret); 885 VIRTIO_PCI_LOG(sc, reinit, 886 "can't set config msix vector\n"); 887 return -1; 888 } 889 890 for (qid = 0; qid < sc->sc_nvqs; qid++) { 891 offset = VIRTIO_CONFIG_QUEUE_SELECT; 892 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid); 893 894 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 895 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 896 897 if (psc->sc_intr_pervq) 898 vector += qid; 899 900 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 901 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 902 if (ret != vector) { 903 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:" 904 " expected=%d, actual=%d\n", 905 __func__, qid, vector, ret); 906 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 907 "msix vector\n", qid); 908 return -1; 909 } 910 } 911 912 return 0; 913 } 914 915 static int 916 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc, 917 struct pci_attach_args *pa) 918 { 919 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 920 device_t self = sc->sc_dev; 921 pci_chipset_tag_t pc = pa->pa_pc; 922 struct virtqueue *vq; 923 char intrbuf[PCI_INTRSTR_LEN]; 924 char intr_xname[INTRDEVNAMEBUF]; 925 char const *intrstr; 926 int idx, qid, n; 927 928 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 929 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 930 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 931 932 snprintf(intr_xname, sizeof(intr_xname), "%s config", 933 device_xname(sc->sc_dev)); 934 935 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 936 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname); 937 if (psc->sc_ihs[idx] == NULL) { 938 aprint_error_dev(self, "couldn't establish MSI-X for config\n"); 939 goto error; 940 } 941 942 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 943 if (psc->sc_intr_pervq) { 944 for (qid = 0; qid < sc->sc_nvqs; qid++) { 945 n = idx + qid; 946 vq = &sc->sc_vqs[qid]; 947 948 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 949 device_xname(sc->sc_dev), qid); 950 951 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 952 pci_intr_setattr(pc, &psc->sc_ihp[n], 953 PCI_INTR_MPSAFE, true); 954 } 955 956 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n], 957 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname); 958 if (psc->sc_ihs[n] == NULL) { 959 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n"); 960 goto error; 961 } 962 } 963 } else { 964 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 965 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 966 967 snprintf(intr_xname, sizeof(intr_xname), "%s queues", 968 device_xname(sc->sc_dev)); 969 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 970 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname); 971 if (psc->sc_ihs[idx] == NULL) { 972 aprint_error_dev(self, "couldn't establish MSI-X for queues\n"); 973 goto error; 974 } 975 } 976 977 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 978 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 979 aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 980 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 981 if (psc->sc_intr_pervq) { 982 kcpuset_t *affinity; 983 int affinity_to, r; 984 985 kcpuset_create(&affinity, false); 986 987 for (qid = 0; qid < sc->sc_nvqs; qid++) { 988 n = idx + qid; 989 affinity_to = (qid / 2) % ncpu; 990 991 intrstr = pci_intr_string(pc, psc->sc_ihp[n], 992 intrbuf, sizeof(intrbuf)); 993 994 kcpuset_zero(affinity); 995 kcpuset_set(affinity, affinity_to); 996 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL); 997 if (r == 0) { 998 aprint_normal_dev(self, 999 "for vq #%d interrupting at %s affinity to %u\n", 1000 qid, intrstr, affinity_to); 1001 } else { 1002 aprint_normal_dev(self, 1003 "for vq #%d interrupting at %s\n", 1004 qid, intrstr); 1005 } 1006 } 1007 1008 kcpuset_destroy(affinity); 1009 } else { 1010 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 1011 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr); 1012 } 1013 1014 return 0; 1015 1016 error: 1017 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1018 if (psc->sc_ihs[idx] != NULL) 1019 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1020 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1021 if (psc->sc_intr_pervq) { 1022 for (qid = 0; qid < sc->sc_nvqs; qid++) { 1023 n = idx + qid; 1024 if (psc->sc_ihs[n] == NULL) 1025 continue; 1026 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]); 1027 } 1028 1029 } else { 1030 if (psc->sc_ihs[idx] != NULL) 1031 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1032 } 1033 1034 return -1; 1035 } 1036 1037 static int 1038 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc, 1039 struct pci_attach_args *pa) 1040 { 1041 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1042 device_t self = sc->sc_dev; 1043 pci_chipset_tag_t pc = pa->pa_pc; 1044 char intrbuf[PCI_INTRSTR_LEN]; 1045 char const *intrstr; 1046 1047 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1048 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true); 1049 1050 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0], 1051 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev)); 1052 if (psc->sc_ihs[0] == NULL) { 1053 aprint_error_dev(self, "couldn't establish INTx\n"); 1054 return -1; 1055 } 1056 1057 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf)); 1058 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1059 1060 return 0; 1061 } 1062 1063 static int 1064 virtio_pci_alloc_interrupts(struct virtio_softc *sc) 1065 { 1066 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1067 device_t self = sc->sc_dev; 1068 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1069 pcitag_t tag = psc->sc_pa.pa_tag; 1070 int error; 1071 int nmsix; 1072 int off; 1073 int counts[PCI_INTR_TYPE_SIZE]; 1074 pci_intr_type_t max_type; 1075 pcireg_t ctl; 1076 1077 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag); 1078 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 1079 1080 /* We need at least two: one for config and the other for queues */ 1081 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) { 1082 /* Try INTx only */ 1083 max_type = PCI_INTR_TYPE_INTX; 1084 counts[PCI_INTR_TYPE_INTX] = 1; 1085 } else { 1086 /* Try MSI-X first and INTx second */ 1087 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) && 1088 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { 1089 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1090 } else { 1091 nmsix = 2; 1092 } 1093 1094 max_type = PCI_INTR_TYPE_MSIX; 1095 counts[PCI_INTR_TYPE_MSIX] = nmsix; 1096 counts[PCI_INTR_TYPE_MSI] = 0; 1097 counts[PCI_INTR_TYPE_INTX] = 1; 1098 } 1099 1100 retry: 1101 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type); 1102 if (error != 0) { 1103 aprint_error_dev(self, "couldn't map interrupt\n"); 1104 return -1; 1105 } 1106 1107 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 1108 psc->sc_intr_pervq = nmsix > 2 ? true : false; 1109 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, 1110 KM_SLEEP); 1111 1112 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa); 1113 if (error != 0) { 1114 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix); 1115 pci_intr_release(pc, psc->sc_ihp, nmsix); 1116 1117 /* Retry INTx */ 1118 max_type = PCI_INTR_TYPE_INTX; 1119 counts[PCI_INTR_TYPE_INTX] = 1; 1120 goto retry; 1121 } 1122 1123 psc->sc_ihs_num = nmsix; 1124 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1125 virtio_pci_adjust_config_region(psc); 1126 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 1127 psc->sc_intr_pervq = false; 1128 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, 1129 KM_SLEEP); 1130 1131 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa); 1132 if (error != 0) { 1133 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1); 1134 pci_intr_release(pc, psc->sc_ihp, 1); 1135 return -1; 1136 } 1137 1138 psc->sc_ihs_num = 1; 1139 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1140 virtio_pci_adjust_config_region(psc); 1141 1142 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL); 1143 if (error != 0) { 1144 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 1145 ctl &= ~PCI_MSIX_CTL_ENABLE; 1146 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 1147 } 1148 } 1149 1150 if (!psc->sc_intr_pervq) 1151 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ); 1152 return 0; 1153 } 1154 1155 static void 1156 virtio_pci_free_interrupts(struct virtio_softc *sc) 1157 { 1158 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1159 1160 for (int i = 0; i < psc->sc_ihs_num; i++) { 1161 if (psc->sc_ihs[i] == NULL) 1162 continue; 1163 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]); 1164 psc->sc_ihs[i] = NULL; 1165 } 1166 1167 if (psc->sc_ihs_num > 0) 1168 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num); 1169 1170 if (psc->sc_ihs != NULL) { 1171 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num); 1172 psc->sc_ihs = NULL; 1173 } 1174 psc->sc_ihs_num = 0; 1175 } 1176 1177 static bool 1178 virtio_pci_msix_enabled(struct virtio_pci_softc *psc) 1179 { 1180 pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1181 1182 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) 1183 return true; 1184 1185 return false; 1186 } 1187 1188 /* 1189 * Interrupt handler. 1190 */ 1191 static int 1192 virtio_pci_intr(void *arg) 1193 { 1194 struct virtio_softc *sc = arg; 1195 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc; 1196 int isr, r = 0; 1197 1198 /* check and ack the interrupt */ 1199 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0); 1200 if (isr == 0) 1201 return 0; 1202 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1203 (sc->sc_config_change != NULL)) 1204 r = (sc->sc_config_change)(sc); 1205 if (sc->sc_intrhand != NULL) { 1206 if (sc->sc_soft_ih != NULL) 1207 softint_schedule(sc->sc_soft_ih); 1208 else 1209 r |= (sc->sc_intrhand)(sc); 1210 } 1211 1212 return r; 1213 } 1214 1215 static int 1216 virtio_pci_msix_queue_intr(void *arg) 1217 { 1218 struct virtio_softc *sc = arg; 1219 int r = 0; 1220 1221 if (sc->sc_intrhand != NULL) { 1222 if (sc->sc_soft_ih != NULL) 1223 softint_schedule(sc->sc_soft_ih); 1224 else 1225 r |= (sc->sc_intrhand)(sc); 1226 } 1227 1228 return r; 1229 } 1230 1231 static int 1232 virtio_pci_msix_config_intr(void *arg) 1233 { 1234 struct virtio_softc *sc = arg; 1235 int r = 0; 1236 1237 if (sc->sc_config_change != NULL) 1238 r = (sc->sc_config_change)(sc); 1239 return r; 1240 } 1241 1242 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio"); 1243 1244 #ifdef _MODULE 1245 #include "ioconf.c" 1246 #endif 1247 1248 static int 1249 virtio_pci_modcmd(modcmd_t cmd, void *opaque) 1250 { 1251 int error = 0; 1252 1253 #ifdef _MODULE 1254 switch (cmd) { 1255 case MODULE_CMD_INIT: 1256 error = config_init_component(cfdriver_ioconf_virtio_pci, 1257 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1258 break; 1259 case MODULE_CMD_FINI: 1260 error = config_fini_component(cfdriver_ioconf_virtio_pci, 1261 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1262 break; 1263 default: 1264 error = ENOTTY; 1265 break; 1266 } 1267 #endif 1268 1269 return error; 1270 } 1271