Lines Matching +full:pci +full:- +full:domain

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
68 * IOMMU units from Intel VT-d.
72 iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
92 snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d",
93 domain, bus, slot, func);
110 * the IOMMU unit and used for page table lookup. PCI bridges may take
114 * domain, and must collectively be assigned to use either IOMMU or
121 device_t l, pci, pcib, pcip, pcibp, requester;
126 pci_class = devclass_find("pci");
129 pci = device_get_parent(dev);
130 if (pci == NULL || device_get_devclass(pci) != pci_class) {
143 pci = device_get_parent(l);
144 KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
146 KASSERT(device_get_devclass(pci) == pci_class,
147 ("iommu_get_requester(%s): non-pci parent %s for %s",
148 device_get_name(dev), device_get_name(pci),
151 pcib = device_get_parent(pci);
153 "for %s", device_get_name(dev), device_get_name(pci)));
156 * The parent of our "bridge" isn't another PCI bus,
157 * so pcib isn't a PCI->PCI bridge but rather a host
170 * unlikely) to have a PCI->PCIe bridge
185 * Check for a buggy PCIe/PCI bridge that
188 * PCI bridge, then we know pcib is actually a
189 * PCIe/PCI bridge.
204 * PCIe->PCI bridge. Assume that the
210 * the bridge is PCIe->PCI-X, and the
215 * non-taken transactions.
223 * conventional PCI->PCI bridge, which
264 if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) {
265 ctx->flags |= IOMMU_CTX_DISABLED;
284 if (!unit->dma_enabled)
301 res = (bus_dma_tag_t)ctx->tag;
313 if (device_get_devclass(parent) != devclass_find("pci"))
324 "iommu%d pci%d:%d:%d requested buswide busdma\n",
325 unit->unit, busno, slot, func);
339 unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
349 return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] &
369 &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
377 newtag->common.impl = &bus_dma_iommu_impl;
378 newtag->ctx = oldtag->ctx;
379 newtag->owner = oldtag->owner;
384 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
408 if (dmat->map_count != 0) {
412 ctx = dmat->ctx;
413 if (dmat == ctx->tag) {
414 iommu = ctx->domain->iommu;
416 iommu_free_ctx_locked(iommu, dmat->ctx);
418 free(dmat->segments, M_IOMMU_DMAMAP);
441 DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
446 if (tag->segments == NULL) {
447 tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
448 tag->common.nsegments, M_IOMMU_DMAMAP,
449 DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
450 if (tag->segments == NULL) {
457 TAILQ_INIT(&map->map_entries);
458 map->tag = tag;
459 map->locked = true;
460 map->cansleep = false;
461 tag->map_count++;
477 if (!TAILQ_EMPTY(&map->map_entries)) {
484 tag->map_count--;
510 if (tag->common.maxsize < PAGE_SIZE &&
511 tag->common.alignment <= tag->common.maxsize &&
513 *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
514 DOMAINSET_PREF(tag->common.domain), mflags);
515 map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
518 DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
520 map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
539 if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
541 map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
543 KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
545 kmem_free(vaddr, tag->common.maxsize);
546 map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
559 struct iommu_domain *domain;
566 segs = tag->segments;
567 ctx = tag->ctx;
568 domain = ctx->domain;
576 if (seg >= tag->common.nsegments) {
580 buflen1 = buflen > tag->common.maxsegsz ?
581 tag->common.maxsegsz : buflen;
587 gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
588 if (seg + 1 < tag->common.nsegments)
591 error = iommu_gas_map(domain, &tag->common, buflen1,
596 if (buflen1 > entry->end - entry->start - offset)
597 buflen1 = entry->end - entry->start - offset;
599 KASSERT(vm_addr_align_ok(entry->start + offset,
600 tag->common.alignment),
602 "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
603 (uintmax_t)tag->common.alignment));
604 KASSERT(entry->end <= tag->common.lowaddr ||
605 entry->start >= tag->common.highaddr,
608 (uintmax_t)entry->start, (uintmax_t)entry->end,
609 (uintmax_t)tag->common.lowaddr,
610 (uintmax_t)tag->common.highaddr));
611 KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1,
612 tag->common.boundary),
614 "boundary 0x%jx", ctx, (uintmax_t)entry->start,
615 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
616 KASSERT(buflen1 <= tag->common.maxsegsz,
619 (uintmax_t)entry->start, (uintmax_t)entry->end,
620 (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
622 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
626 segs[seg].ds_addr = entry->start + offset;
632 buflen -= buflen1;
645 struct iommu_domain *domain;
649 ctx = tag->ctx;
650 domain = ctx->domain;
651 atomic_add_long(&ctx->loads, 1);
658 TAILQ_CONCAT(&map->map_entries, &entries, dmamap_link);
666 IOMMU_DOMAIN_LOCK(domain);
667 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link);
668 IOMMU_DOMAIN_UNLOCK(domain);
669 taskqueue_enqueue(domain->iommu->delayed_taskqueue,
670 &domain->unload_task);
674 !map->cansleep)
677 iommu_bus_schedule_dmamap(domain->iommu, map);
711 ma_cnt = OFF_TO_IDX(pend - pstart);
712 mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
762 ma_cnt = OFF_TO_IDX(pend - pstart);
763 mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
808 map->mem = *mem;
809 map->tag = (struct bus_dma_tag_iommu *)dmat;
810 map->callback = callback;
811 map->callback_arg = callback_arg;
824 if (!map->locked) {
825 KASSERT(map->cansleep,
832 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
833 map->locked = true;
837 segs = tag->segments;
857 struct iommu_domain *domain;
862 ctx = tag->ctx;
863 domain = ctx->domain;
864 atomic_add_long(&ctx->unloads, 1);
868 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
871 IOMMU_DOMAIN_LOCK(domain);
872 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link);
873 IOMMU_DOMAIN_UNLOCK(domain);
874 taskqueue_enqueue(domain->iommu->delayed_taskqueue,
875 &domain->unload_task);
878 iommu_domain_unload(domain, &entries, false);
891 kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
903 memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc));
937 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
938 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
940 tag = map->tag;
941 map->cansleep = true;
942 map->locked = false;
944 &map->mem, map->callback, map->callback_arg,
946 map->cansleep = false;
947 if (map->locked) {
948 (tag->common.lockfunc)(tag->common.lockfuncarg,
951 map->locked = true;
952 map->cansleep = false;
962 map->locked = false;
964 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
966 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
974 unit->dma_enabled = 0;
975 error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
977 TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
978 SYSCTL_ADD_INT(&unit->sysctl_ctx,
979 SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)),
980 OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0,
982 TAILQ_INIT(&unit->delayed_maps);
983 TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
984 unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
985 taskqueue_thread_enqueue, &unit->delayed_taskqueue);
986 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
987 "iommu%d busdma taskq", unit->unit);
995 if (unit->delayed_taskqueue == NULL)
998 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
999 taskqueue_free(unit->delayed_taskqueue);
1000 unit->delayed_taskqueue = NULL;
1011 struct iommu_domain *domain;
1025 if (tc->impl != &bus_dma_iommu_impl)
1029 ctx = tag->ctx;
1030 domain = ctx->domain;
1034 entry = iommu_gas_alloc_entry(domain, waitok ? 0 : IOMMU_PGF_WAITOK);
1037 entry->start = start;
1038 entry->end = start + length;
1046 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
1049 error = iommu_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
1054 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
1068 struct iommu_domain *domain;
1071 domain = arg;
1075 IOMMU_DOMAIN_LOCK(domain);
1076 TAILQ_SWAP(&domain->unload_entries, &entries,
1078 IOMMU_DOMAIN_UNLOCK(domain);
1081 iommu_domain_unload(domain, &entries, true);
1086 iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
1090 domain->ops = ops;
1091 domain->iommu = unit;
1093 TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain);
1094 RB_INIT(&domain->rb_root);
1095 TAILQ_INIT(&domain->unload_entries);
1096 mtx_init(&domain->lock, "iodom", NULL, MTX_DEF);
1100 iommu_domain_fini(struct iommu_domain *domain)
1103 mtx_destroy(&domain->lock);