1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2006 Yahoo!, Inc. 5 * All rights reserved. 6 * Written by: John Baldwin <jhb@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Support for PCI Message Signalled Interrupts (MSI). MSI interrupts on 35 * x86 are basically APIC messages that the northbridge delivers directly 36 * to the local APICs as if they had come from an I/O APIC. 37 */ 38 39 #include <sys/cdefs.h> 40 #include "opt_acpi.h" 41 #include "opt_iommu.h" 42 43 #include <sys/param.h> 44 #include <sys/bus.h> 45 #include <sys/kernel.h> 46 #include <sys/limits.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/sx.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #include <x86/apicreg.h> 54 #include <machine/cputypes.h> 55 #include <machine/md_var.h> 56 #include <machine/frame.h> 57 #include <machine/intr_machdep.h> 58 #include <x86/apicvar.h> 59 #include <x86/iommu/iommu_intrmap.h> 60 #include <machine/specialreg.h> 61 #include <dev/pci/pcivar.h> 62 63 /* Fields in address for Intel MSI messages. */ 64 #define MSI_INTEL_ADDR_DEST 0x000ff000 65 #define MSI_INTEL_ADDR_RH 0x00000008 66 # define MSI_INTEL_ADDR_RH_ON 0x00000008 67 # define MSI_INTEL_ADDR_RH_OFF 0x00000000 68 #define MSI_INTEL_ADDR_DM 0x00000004 69 # define MSI_INTEL_ADDR_DM_PHYSICAL 0x00000000 70 # define MSI_INTEL_ADDR_DM_LOGICAL 0x00000004 71 72 /* Fields in data for Intel MSI messages. */ 73 #define MSI_INTEL_DATA_TRGRMOD IOART_TRGRMOD /* Trigger mode. */ 74 # define MSI_INTEL_DATA_TRGREDG IOART_TRGREDG 75 # define MSI_INTEL_DATA_TRGRLVL IOART_TRGRLVL 76 #define MSI_INTEL_DATA_LEVEL 0x00004000 /* Polarity. */ 77 # define MSI_INTEL_DATA_DEASSERT 0x00000000 78 # define MSI_INTEL_DATA_ASSERT 0x00004000 79 #define MSI_INTEL_DATA_DELMOD IOART_DELMOD /* Delivery mode. */ 80 # define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED 81 # define MSI_INTEL_DATA_DELLOPRI IOART_DELLOPRI 82 # define MSI_INTEL_DATA_DELSMI IOART_DELSMI 83 # define MSI_INTEL_DATA_DELNMI IOART_DELNMI 84 # define MSI_INTEL_DATA_DELINIT IOART_DELINIT 85 # define MSI_INTEL_DATA_DELEXINT IOART_DELEXINT 86 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ 87 88 /* 89 * Build Intel MSI message and data values from a source. AMD64 systems 90 * seem to be compatible, so we use the same function for both. 91 */ 92 #define INTEL_ADDR(msi) \ 93 (MSI_INTEL_ADDR_BASE | (msi)->msi_cpu << 12 | \ 94 MSI_INTEL_ADDR_RH_OFF | MSI_INTEL_ADDR_DM_PHYSICAL) 95 #define INTEL_DATA(msi) \ 96 (MSI_INTEL_DATA_TRGREDG | MSI_INTEL_DATA_DELFIXED | (msi)->msi_vector) 97 98 static MALLOC_DEFINE(M_MSI, "msi", "PCI MSI"); 99 100 /* 101 * MSI sources are bunched into groups. This is because MSI forces 102 * all of the messages to share the address and data registers and 103 * thus certain properties (such as the local APIC ID target on x86). 104 * Each group has a 'first' source that contains information global to 105 * the group. These fields are marked with (g) below. 106 * 107 * Note that local APIC ID is kind of special. Each message will be 108 * assigned an ID by the system; however, a group will use the ID from 109 * the first message. 110 * 111 * For MSI-X, each message is isolated. 112 */ 113 struct msi_intsrc { 114 struct intsrc msi_intsrc; 115 device_t msi_dev; /* Owning device. (g) */ 116 struct msi_intsrc *msi_first; /* First source in group. */ 117 u_int *msi_irqs; /* Group's IRQ list. (g) */ 118 u_int msi_irq; /* IRQ cookie. */ 119 u_int msi_cpu; /* Local APIC ID. (g) */ 120 u_int msi_remap_cookie; /* IOMMU cookie. */ 121 u_int msi_vector:8; /* IDT vector. */ 122 u_int msi_count:8; /* Messages in this group. (g) */ 123 u_int msi_maxcount:8; /* Alignment for this group. (g) */ 124 u_int msi_enabled:8; /* Enabled messages in this group. (g) */ 125 bool msi_msix; /* MSI-X message. */ 126 }; 127 128 static void msi_create_source(void); 129 static void msi_enable_source(struct intsrc *isrc); 130 static void msi_disable_source(struct intsrc *isrc, int eoi); 131 static void msi_eoi_source(struct intsrc *isrc); 132 static void msi_enable_intr(struct intsrc *isrc); 133 static void msi_disable_intr(struct intsrc *isrc); 134 static int msi_vector(struct intsrc *isrc); 135 static int msi_source_pending(struct intsrc *isrc); 136 static int msi_config_intr(struct intsrc *isrc, enum intr_trigger trig, 137 enum intr_polarity pol); 138 static int msi_assign_cpu(struct intsrc *isrc, u_int apic_id); 139 140 struct pic msi_pic = { 141 .pic_enable_source = msi_enable_source, 142 .pic_disable_source = msi_disable_source, 143 .pic_eoi_source = msi_eoi_source, 144 .pic_enable_intr = msi_enable_intr, 145 .pic_disable_intr = msi_disable_intr, 146 .pic_vector = msi_vector, 147 .pic_source_pending = msi_source_pending, 148 .pic_suspend = NULL, 149 .pic_resume = NULL, 150 .pic_config_intr = msi_config_intr, 151 .pic_assign_cpu = msi_assign_cpu, 152 .pic_reprogram_pin = NULL, 153 }; 154 155 u_int first_msi_irq; 156 SYSCTL_UINT(_machdep, OID_AUTO, first_msi_irq, CTLFLAG_RD, &first_msi_irq, 0, 157 "Number of first IRQ reserved for MSI and MSI-X interrupts"); 158 159 u_int num_msi_irqs = 2048; 160 SYSCTL_UINT(_machdep, OID_AUTO, num_msi_irqs, CTLFLAG_RDTUN, &num_msi_irqs, 0, 161 "Number of IRQs reserved for MSI and MSI-X interrupts"); 162 163 #ifdef SMP 164 /** 165 * Xen hypervisors prior to 4.6.0 do not properly handle updates to 166 * enabled MSI-X table entries. Allow migration of MSI-X interrupts 167 * to be disabled via a tunable. Values have the following meaning: 168 * 169 * -1: automatic detection by FreeBSD 170 * 0: enable migration 171 * 1: disable migration 172 */ 173 int msix_disable_migration = -1; 174 SYSCTL_INT(_machdep, OID_AUTO, disable_msix_migration, CTLFLAG_RDTUN, 175 &msix_disable_migration, 0, 176 "Disable migration of MSI-X interrupts between CPUs"); 177 #endif 178 179 static int msi_enabled; 180 static u_int msi_last_irq; 181 static struct mtx msi_lock; 182 183 static void 184 msi_enable_source(struct intsrc *isrc) 185 { 186 } 187 188 static void 189 msi_disable_source(struct intsrc *isrc, int eoi) 190 { 191 192 if (eoi == PIC_EOI) 193 lapic_eoi(); 194 } 195 196 static void 197 msi_eoi_source(struct intsrc *isrc) 198 { 199 200 lapic_eoi(); 201 } 202 203 static void 204 msi_enable_intr(struct intsrc *isrc) 205 { 206 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 207 208 msi = msi->msi_first; 209 if (msi->msi_enabled == 0) { 210 for (u_int i = 0; i < msi->msi_count; i++) 211 apic_enable_vector(msi->msi_cpu, msi->msi_vector + i); 212 } 213 msi->msi_enabled++; 214 } 215 216 static void 217 msi_disable_intr(struct intsrc *isrc) 218 { 219 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 220 221 msi = msi->msi_first; 222 msi->msi_enabled--; 223 if (msi->msi_enabled == 0) { 224 for (u_int i = 0; i < msi->msi_count; i++) 225 apic_disable_vector(msi->msi_cpu, msi->msi_vector + i); 226 } 227 } 228 229 static int 230 msi_vector(struct intsrc *isrc) 231 { 232 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 233 234 return (msi->msi_irq); 235 } 236 237 static int 238 msi_source_pending(struct intsrc *isrc) 239 { 240 241 return (0); 242 } 243 244 static int 245 msi_config_intr(struct intsrc *isrc, enum intr_trigger trig, 246 enum intr_polarity pol) 247 { 248 249 return (ENODEV); 250 } 251 252 static int 253 msi_assign_cpu(struct intsrc *isrc, u_int apic_id) 254 { 255 struct msi_intsrc *sib, *msi = (struct msi_intsrc *)isrc; 256 int old_vector; 257 u_int old_id; 258 int error, i, vector; 259 260 /* 261 * Only allow CPUs to be assigned to the first message for an 262 * MSI group. 263 */ 264 if (msi->msi_first != msi) 265 return (EINVAL); 266 267 #ifdef SMP 268 if (msix_disable_migration && msi->msi_msix) 269 return (EINVAL); 270 #endif 271 272 /* Store information to free existing irq. */ 273 old_vector = msi->msi_vector; 274 old_id = msi->msi_cpu; 275 if (old_id == apic_id) 276 return (0); 277 278 /* Allocate IDT vectors on this cpu. */ 279 if (msi->msi_count > 1) { 280 KASSERT(!msi->msi_msix, ("MSI-X message group")); 281 vector = apic_alloc_vectors(apic_id, msi->msi_irqs, 282 msi->msi_count, msi->msi_maxcount); 283 } else 284 vector = apic_alloc_vector(apic_id, msi->msi_irq); 285 if (vector == 0) 286 return (ENOSPC); 287 288 /* Must be set before BUS_REMAP_INTR as it may call back into MSI. */ 289 msi->msi_cpu = apic_id; 290 msi->msi_vector = vector; 291 if (msi->msi_enabled > 0) { 292 for (i = 0; i < msi->msi_count; i++) 293 apic_enable_vector(apic_id, vector + i); 294 } 295 error = BUS_REMAP_INTR(device_get_parent(msi->msi_dev), msi->msi_dev, 296 msi->msi_irq); 297 if (error == 0) { 298 if (bootverbose) { 299 printf("msi: Assigning %s IRQ %d to local APIC %u vector %u\n", 300 msi->msi_msix ? "MSI-X" : "MSI", msi->msi_irq, 301 msi->msi_cpu, msi->msi_vector); 302 } 303 for (i = 1; i < msi->msi_count; i++) { 304 sib = (struct msi_intsrc *)intr_lookup_source( 305 msi->msi_irqs[i]); 306 sib->msi_cpu = apic_id; 307 sib->msi_vector = vector + i; 308 if (bootverbose) 309 printf("msi: Assigning MSI IRQ %d to local APIC %u vector %u\n", 310 sib->msi_irq, sib->msi_cpu, 311 sib->msi_vector); 312 } 313 } else { 314 device_printf(msi->msi_dev, 315 "remap irq %u to APIC ID %u failed (error %d)\n", 316 msi->msi_irq, apic_id, error); 317 msi->msi_cpu = old_id; 318 msi->msi_vector = old_vector; 319 old_id = apic_id; 320 old_vector = vector; 321 } 322 323 /* 324 * Free the old vector after the new one is established. This is done 325 * to prevent races where we could miss an interrupt. If BUS_REMAP_INTR 326 * failed then we disable and free the new, unused vector(s). 327 */ 328 if (msi->msi_enabled > 0) { 329 for (i = 0; i < msi->msi_count; i++) 330 apic_disable_vector(old_id, old_vector + i); 331 } 332 apic_free_vector(old_id, old_vector, msi->msi_irq); 333 for (i = 1; i < msi->msi_count; i++) 334 apic_free_vector(old_id, old_vector + i, msi->msi_irqs[i]); 335 return (error); 336 } 337 338 void 339 msi_init(void) 340 { 341 342 /* Check if we have a supported CPU. */ 343 switch (cpu_vendor_id) { 344 case CPU_VENDOR_INTEL: 345 case CPU_VENDOR_AMD: 346 case CPU_VENDOR_HYGON: 347 break; 348 case CPU_VENDOR_CENTAUR: 349 if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 350 CPUID_TO_MODEL(cpu_id) >= 0xf) 351 break; 352 /* FALLTHROUGH */ 353 default: 354 return; 355 } 356 357 #ifdef SMP 358 if (msix_disable_migration == -1) { 359 /* The default is to allow migration of MSI-X interrupts. */ 360 msix_disable_migration = 0; 361 } 362 #endif 363 364 if (num_msi_irqs == 0) 365 return; 366 367 first_msi_irq = num_io_irqs; 368 if (num_msi_irqs > UINT_MAX - first_msi_irq) 369 panic("num_msi_irqs too high"); 370 num_io_irqs = first_msi_irq + num_msi_irqs; 371 372 msi_enabled = 1; 373 intr_register_pic(&msi_pic); 374 mtx_init(&msi_lock, "msi", NULL, MTX_DEF); 375 } 376 377 static void 378 msi_create_source(void) 379 { 380 struct msi_intsrc *msi; 381 u_int irq; 382 383 mtx_lock(&msi_lock); 384 if (msi_last_irq >= num_msi_irqs) { 385 mtx_unlock(&msi_lock); 386 return; 387 } 388 irq = msi_last_irq + first_msi_irq; 389 msi_last_irq++; 390 mtx_unlock(&msi_lock); 391 392 msi = malloc(sizeof(struct msi_intsrc), M_MSI, M_WAITOK | M_ZERO); 393 msi->msi_intsrc.is_pic = &msi_pic; 394 msi->msi_irq = irq; 395 intr_register_source(&msi->msi_intsrc); 396 nexus_add_irq(irq); 397 } 398 399 /* 400 * Try to allocate 'count' interrupt sources with contiguous IDT values. 401 */ 402 int 403 msi_alloc(device_t dev, int count, int maxcount, int *irqs) 404 { 405 struct msi_intsrc *msi, *fsrc; 406 u_int cpu, domain, *mirqs; 407 int cnt, i, vector; 408 #ifdef IOMMU 409 u_int cookies[count]; 410 int error; 411 #endif 412 413 if (!msi_enabled) 414 return (ENXIO); 415 416 if (bus_get_domain(dev, &domain) != 0) 417 domain = 0; 418 419 if (count > 1) 420 mirqs = malloc(count * sizeof(*mirqs), M_MSI, M_WAITOK); 421 else 422 mirqs = NULL; 423 again: 424 mtx_lock(&msi_lock); 425 426 /* Try to find 'count' free IRQs. */ 427 cnt = 0; 428 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) { 429 msi = (struct msi_intsrc *)intr_lookup_source(i); 430 431 /* End of allocated sources, so break. */ 432 if (msi == NULL) 433 break; 434 435 /* If this is a free one, save its IRQ in the array. */ 436 if (msi->msi_dev == NULL) { 437 irqs[cnt] = i; 438 cnt++; 439 if (cnt == count) 440 break; 441 } 442 } 443 444 /* Do we need to create some new sources? */ 445 if (cnt < count) { 446 /* If we would exceed the max, give up. */ 447 if (i + (count - cnt) > first_msi_irq + num_msi_irqs) { 448 mtx_unlock(&msi_lock); 449 free(mirqs, M_MSI); 450 return (ENXIO); 451 } 452 mtx_unlock(&msi_lock); 453 454 /* We need count - cnt more sources. */ 455 while (cnt < count) { 456 msi_create_source(); 457 cnt++; 458 } 459 goto again; 460 } 461 462 /* Ok, we now have the IRQs allocated. */ 463 KASSERT(cnt == count, ("count mismatch")); 464 465 /* Allocate 'count' IDT vectors. */ 466 cpu = intr_next_cpu(domain); 467 vector = apic_alloc_vectors(cpu, irqs, count, maxcount); 468 if (vector == 0) { 469 mtx_unlock(&msi_lock); 470 free(mirqs, M_MSI); 471 return (ENOSPC); 472 } 473 474 #ifdef IOMMU 475 mtx_unlock(&msi_lock); 476 error = iommu_alloc_msi_intr(dev, cookies, count); 477 mtx_lock(&msi_lock); 478 if (error == EOPNOTSUPP) 479 error = 0; 480 if (error != 0) { 481 for (i = 0; i < count; i++) 482 apic_free_vector(cpu, vector + i, irqs[i]); 483 mtx_unlock(&msi_lock); 484 free(mirqs, M_MSI); 485 return (error); 486 } 487 for (i = 0; i < count; i++) { 488 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 489 msi->msi_remap_cookie = cookies[i]; 490 } 491 #endif 492 493 /* Assign IDT vectors and make these messages owned by 'dev'. */ 494 fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]); 495 for (i = 0; i < count; i++) { 496 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 497 msi->msi_cpu = cpu; 498 msi->msi_dev = dev; 499 msi->msi_vector = vector + i; 500 if (bootverbose) 501 printf( 502 "msi: routing MSI IRQ %d to local APIC %u vector %u\n", 503 msi->msi_irq, msi->msi_cpu, msi->msi_vector); 504 msi->msi_first = fsrc; 505 KASSERT(msi->msi_intsrc.is_handlers == 0, 506 ("dead MSI has handlers")); 507 } 508 fsrc->msi_count = count; 509 fsrc->msi_maxcount = maxcount; 510 if (count > 1) 511 bcopy(irqs, mirqs, count * sizeof(*mirqs)); 512 fsrc->msi_irqs = mirqs; 513 mtx_unlock(&msi_lock); 514 return (0); 515 } 516 517 int 518 msi_release(int *irqs, int count) 519 { 520 struct msi_intsrc *msi, *first; 521 int i; 522 523 mtx_lock(&msi_lock); 524 first = (struct msi_intsrc *)intr_lookup_source(irqs[0]); 525 if (first == NULL) { 526 mtx_unlock(&msi_lock); 527 return (ENOENT); 528 } 529 530 /* Make sure this isn't an MSI-X message. */ 531 if (first->msi_msix) { 532 mtx_unlock(&msi_lock); 533 return (EINVAL); 534 } 535 536 /* Make sure this message is allocated to a group. */ 537 if (first->msi_first == NULL) { 538 mtx_unlock(&msi_lock); 539 return (ENXIO); 540 } 541 542 /* 543 * Make sure this is the start of a group and that we are releasing 544 * the entire group. 545 */ 546 if (first->msi_first != first || first->msi_count != count) { 547 mtx_unlock(&msi_lock); 548 return (EINVAL); 549 } 550 KASSERT(first->msi_dev != NULL, ("unowned group")); 551 552 /* Clear all the extra messages in the group. */ 553 for (i = 1; i < count; i++) { 554 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 555 KASSERT(msi->msi_first == first, ("message not in group")); 556 KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch")); 557 #ifdef IOMMU 558 mtx_unlock(&msi_lock); 559 iommu_unmap_msi_intr(first->msi_dev, msi->msi_remap_cookie); 560 mtx_lock(&msi_lock); 561 #endif 562 msi->msi_first = NULL; 563 msi->msi_dev = NULL; 564 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq); 565 msi->msi_vector = 0; 566 } 567 568 /* Clear out the first message. */ 569 #ifdef IOMMU 570 mtx_unlock(&msi_lock); 571 iommu_unmap_msi_intr(first->msi_dev, first->msi_remap_cookie); 572 mtx_lock(&msi_lock); 573 #endif 574 first->msi_first = NULL; 575 first->msi_dev = NULL; 576 apic_free_vector(first->msi_cpu, first->msi_vector, first->msi_irq); 577 first->msi_vector = 0; 578 first->msi_count = 0; 579 first->msi_maxcount = 0; 580 free(first->msi_irqs, M_MSI); 581 first->msi_irqs = NULL; 582 583 mtx_unlock(&msi_lock); 584 return (0); 585 } 586 587 int 588 msi_map(int irq, uint64_t *addr, uint32_t *data) 589 { 590 struct msi_intsrc *msi; 591 int error; 592 #ifdef IOMMU 593 struct msi_intsrc *msi1; 594 int i, k; 595 #endif 596 597 mtx_lock(&msi_lock); 598 msi = (struct msi_intsrc *)intr_lookup_source(irq); 599 if (msi == NULL) { 600 mtx_unlock(&msi_lock); 601 return (ENOENT); 602 } 603 604 /* Make sure this message is allocated to a device. */ 605 if (msi->msi_dev == NULL) { 606 mtx_unlock(&msi_lock); 607 return (ENXIO); 608 } 609 610 /* 611 * If this message isn't an MSI-X message, make sure it's part 612 * of a group, and switch to the first message in the 613 * group. 614 */ 615 if (!msi->msi_msix) { 616 if (msi->msi_first == NULL) { 617 mtx_unlock(&msi_lock); 618 return (ENXIO); 619 } 620 msi = msi->msi_first; 621 } 622 623 #ifdef IOMMU 624 if (!msi->msi_msix) { 625 for (k = msi->msi_count - 1, i = first_msi_irq; k > 0 && 626 i < first_msi_irq + num_msi_irqs; i++) { 627 if (i == msi->msi_irq) 628 continue; 629 msi1 = (struct msi_intsrc *)intr_lookup_source(i); 630 if (!msi1->msi_msix && msi1->msi_first == msi) { 631 mtx_unlock(&msi_lock); 632 iommu_map_msi_intr(msi1->msi_dev, 633 msi1->msi_cpu, msi1->msi_vector, 634 msi1->msi_remap_cookie, NULL, NULL); 635 k--; 636 mtx_lock(&msi_lock); 637 } 638 } 639 } 640 mtx_unlock(&msi_lock); 641 error = iommu_map_msi_intr(msi->msi_dev, msi->msi_cpu, 642 msi->msi_vector, msi->msi_remap_cookie, addr, data); 643 #else 644 mtx_unlock(&msi_lock); 645 error = EOPNOTSUPP; 646 #endif 647 if (error == EOPNOTSUPP && msi->msi_cpu > 0xff) { 648 printf("%s: unsupported destination APIC ID %u\n", __func__, 649 msi->msi_cpu); 650 error = EINVAL; 651 } 652 if (error == EOPNOTSUPP) { 653 *addr = INTEL_ADDR(msi); 654 *data = INTEL_DATA(msi); 655 error = 0; 656 } 657 return (error); 658 } 659 660 int 661 msix_alloc(device_t dev, int *irq) 662 { 663 struct msi_intsrc *msi; 664 u_int cpu, domain; 665 int i, vector; 666 #ifdef IOMMU 667 u_int cookie; 668 int error; 669 #endif 670 671 if (!msi_enabled) 672 return (ENXIO); 673 674 if (bus_get_domain(dev, &domain) != 0) 675 domain = 0; 676 677 again: 678 mtx_lock(&msi_lock); 679 680 /* Find a free IRQ. */ 681 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) { 682 msi = (struct msi_intsrc *)intr_lookup_source(i); 683 684 /* End of allocated sources, so break. */ 685 if (msi == NULL) 686 break; 687 688 /* Stop at the first free source. */ 689 if (msi->msi_dev == NULL) 690 break; 691 } 692 693 /* Are all IRQs in use? */ 694 if (i == first_msi_irq + num_msi_irqs) { 695 mtx_unlock(&msi_lock); 696 return (ENXIO); 697 } 698 699 /* Do we need to create a new source? */ 700 if (msi == NULL) { 701 mtx_unlock(&msi_lock); 702 703 /* Create a new source. */ 704 msi_create_source(); 705 goto again; 706 } 707 708 /* Allocate an IDT vector. */ 709 cpu = intr_next_cpu(domain); 710 vector = apic_alloc_vector(cpu, i); 711 if (vector == 0) { 712 mtx_unlock(&msi_lock); 713 return (ENOSPC); 714 } 715 716 msi->msi_dev = dev; 717 #ifdef IOMMU 718 mtx_unlock(&msi_lock); 719 error = iommu_alloc_msi_intr(dev, &cookie, 1); 720 mtx_lock(&msi_lock); 721 if (error == EOPNOTSUPP) 722 error = 0; 723 if (error != 0) { 724 msi->msi_dev = NULL; 725 apic_free_vector(cpu, vector, i); 726 mtx_unlock(&msi_lock); 727 return (error); 728 } 729 msi->msi_remap_cookie = cookie; 730 #endif 731 732 if (bootverbose) 733 printf("msi: routing MSI-X IRQ %d to local APIC %u vector %u\n", 734 msi->msi_irq, cpu, vector); 735 736 /* Setup source. */ 737 msi->msi_cpu = cpu; 738 msi->msi_first = msi; 739 msi->msi_vector = vector; 740 msi->msi_msix = true; 741 msi->msi_count = 1; 742 msi->msi_maxcount = 1; 743 msi->msi_irqs = NULL; 744 745 KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI-X has handlers")); 746 mtx_unlock(&msi_lock); 747 748 *irq = i; 749 return (0); 750 } 751 752 int 753 msix_release(int irq) 754 { 755 struct msi_intsrc *msi; 756 757 mtx_lock(&msi_lock); 758 msi = (struct msi_intsrc *)intr_lookup_source(irq); 759 if (msi == NULL) { 760 mtx_unlock(&msi_lock); 761 return (ENOENT); 762 } 763 764 /* Make sure this is an MSI-X message. */ 765 if (!msi->msi_msix) { 766 mtx_unlock(&msi_lock); 767 return (EINVAL); 768 } 769 770 KASSERT(msi->msi_dev != NULL, ("unowned message")); 771 772 /* Clear out the message. */ 773 #ifdef IOMMU 774 mtx_unlock(&msi_lock); 775 iommu_unmap_msi_intr(msi->msi_dev, msi->msi_remap_cookie); 776 mtx_lock(&msi_lock); 777 #endif 778 msi->msi_first = NULL; 779 msi->msi_dev = NULL; 780 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq); 781 msi->msi_vector = 0; 782 msi->msi_msix = false; 783 msi->msi_count = 0; 784 msi->msi_maxcount = 0; 785 786 mtx_unlock(&msi_lock); 787 return (0); 788 } 789