Lines Matching +full:iommu +full:- +full:map

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
59 #include <dev/iommu/iommu.h>
63 #include <machine/iommu.h>
64 #include <dev/iommu/busdma_iommu.h>
68 * IOMMU units from Intel VT-d.
78 static const char iommu_str[] = "iommu";
110 * the IOMMU unit and used for page table lookup. PCI bridges may take
114 * domain, and must collectively be assigned to use either IOMMU or
139 * host port to find the translating bridge nearest the IOMMU
147 ("iommu_get_requester(%s): non-pci parent %s for %s",
157 * so pcib isn't a PCI->PCI bridge but rather a host
170 * unlikely) to have a PCI->PCIe bridge
177 * requester by IOMMU unit. Check whether the
204 * PCIe->PCI bridge. Assume that the
210 * the bridge is PCIe->PCI-X, and the
215 * non-taken transactions.
223 * conventional PCI->PCI bridge, which
246 * If the user requested the IOMMU disabled for the device, we
247 * cannot disable the IOMMU unit, due to possibility of other
248 * devices on the same IOMMU unit still requiring translation.
264 if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) {
265 ctx->flags |= IOMMU_CTX_DISABLED;
281 /* Not in scope of any IOMMU ? */
284 if (!unit->dma_enabled)
301 res = (bus_dma_tag_t)ctx->tag;
324 "iommu%d pci%d:%d:%d requested buswide busdma\n",
325 unit->unit, busno, slot, func);
339 unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
349 return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] &
353 static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map");
356 struct bus_dmamap_iommu *map);
369 &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
377 newtag->common.impl = &bus_dma_iommu_impl;
378 newtag->ctx = oldtag->ctx;
379 newtag->owner = oldtag->owner;
384 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
400 struct iommu_unit *iommu;
408 if (dmat->map_count != 0) {
412 ctx = dmat->ctx;
413 if (dmat == ctx->tag) {
414 iommu = ctx->domain->iommu;
415 IOMMU_LOCK(iommu);
416 iommu_free_ctx_locked(iommu, dmat->ctx);
418 free(dmat->segments, M_IOMMU_DMAMAP);
437 struct bus_dmamap_iommu *map;
440 map = malloc_domainset(sizeof(*map), M_IOMMU_DMAMAP,
441 DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
442 if (map == NULL) {
446 if (tag->segments == NULL) {
447 tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
448 tag->common.nsegments, M_IOMMU_DMAMAP,
449 DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
450 if (tag->segments == NULL) {
451 free(map, M_IOMMU_DMAMAP);
456 IOMMU_DMAMAP_INIT(map);
457 TAILQ_INIT(&map->map_entries);
458 map->tag = tag;
459 map->locked = true;
460 map->cansleep = false;
461 tag->map_count++;
462 *mapp = (bus_dmamap_t)map;
471 struct bus_dmamap_iommu *map;
474 map = (struct bus_dmamap_iommu *)map1;
475 if (map != NULL) {
476 IOMMU_DMAMAP_LOCK(map);
477 if (!TAILQ_EMPTY(&map->map_entries)) {
478 IOMMU_DMAMAP_UNLOCK(map);
481 IOMMU_DMAMAP_DESTROY(map);
482 free(map, M_IOMMU_DMAMAP);
484 tag->map_count--;
494 struct bus_dmamap_iommu *map;
508 map = (struct bus_dmamap_iommu *)*mapp;
510 if (tag->common.maxsize < PAGE_SIZE &&
511 tag->common.alignment <= tag->common.maxsize &&
513 *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
514 DOMAINSET_PREF(tag->common.domain), mflags);
515 map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
518 DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
520 map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
534 struct bus_dmamap_iommu *map;
537 map = (struct bus_dmamap_iommu *)map1;
539 if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
541 map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
543 KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
544 ("iommu_bus_dmamem_free for non alloced map %p", map));
545 kmem_free(vaddr, tag->common.maxsize);
546 map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
554 struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
566 segs = tag->segments;
567 ctx = tag->ctx;
568 domain = ctx->domain;
576 if (seg >= tag->common.nsegments) {
580 buflen1 = buflen > tag->common.maxsegsz ?
581 tag->common.maxsegsz : buflen;
587 gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
588 if (seg + 1 < tag->common.nsegments)
591 error = iommu_gas_map(domain, &tag->common, buflen1,
596 if (buflen1 > entry->end - entry->start - offset)
597 buflen1 = entry->end - entry->start - offset;
599 KASSERT(vm_addr_align_ok(entry->start + offset,
600 tag->common.alignment),
602 "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
603 (uintmax_t)tag->common.alignment));
604 KASSERT(entry->end <= tag->common.lowaddr ||
605 entry->start >= tag->common.highaddr,
608 (uintmax_t)entry->start, (uintmax_t)entry->end,
609 (uintmax_t)tag->common.lowaddr,
610 (uintmax_t)tag->common.highaddr));
611 KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1,
612 tag->common.boundary),
614 "boundary 0x%jx", ctx, (uintmax_t)entry->start,
615 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
616 KASSERT(buflen1 <= tag->common.maxsegsz,
619 (uintmax_t)entry->start, (uintmax_t)entry->end,
620 (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
622 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
626 segs[seg].ds_addr = entry->start + offset;
632 buflen -= buflen1;
641 struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
649 ctx = tag->ctx;
650 domain = ctx->domain;
651 atomic_add_long(&ctx->loads, 1);
654 error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
657 IOMMU_DMAMAP_LOCK(map);
658 TAILQ_CONCAT(&map->map_entries, &entries, dmamap_link);
659 IOMMU_DMAMAP_UNLOCK(map);
667 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link);
669 taskqueue_enqueue(domain->iommu->delayed_taskqueue,
670 &domain->unload_task);
674 !map->cansleep)
677 iommu_bus_schedule_dmamap(domain->iommu, map);
687 struct bus_dmamap_iommu *map;
690 map = (struct bus_dmamap_iommu *)map1;
691 return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
701 struct bus_dmamap_iommu *map;
707 map = (struct bus_dmamap_iommu *)map1;
711 ma_cnt = OFF_TO_IDX(pend - pstart);
712 mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
739 error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
752 struct bus_dmamap_iommu *map;
758 map = (struct bus_dmamap_iommu *)map1;
762 ma_cnt = OFF_TO_IDX(pend - pstart);
763 mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
792 error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
803 struct bus_dmamap_iommu *map;
807 map = (struct bus_dmamap_iommu *)map1;
808 map->mem = *mem;
809 map->tag = (struct bus_dma_tag_iommu *)dmat;
810 map->callback = callback;
811 map->callback_arg = callback_arg;
819 struct bus_dmamap_iommu *map;
822 map = (struct bus_dmamap_iommu *)map1;
824 if (!map->locked) {
825 KASSERT(map->cansleep,
826 ("map not locked and not sleepable context %p", map));
832 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
833 map->locked = true;
837 segs = tag->segments;
842 * The limitations of busdma KPI forces the iommu to perform the actual
843 * unload, consisting of the unmapping of the map entries page tables,
855 struct bus_dmamap_iommu *map;
861 map = (struct bus_dmamap_iommu *)map1;
862 ctx = tag->ctx;
863 domain = ctx->domain;
864 atomic_add_long(&ctx->unloads, 1);
867 IOMMU_DMAMAP_LOCK(map);
868 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
869 IOMMU_DMAMAP_UNLOCK(map);
872 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link);
874 taskqueue_enqueue(domain->iommu->delayed_taskqueue,
875 &domain->unload_task);
888 struct bus_dmamap_iommu *map __unused;
890 map = (struct bus_dmamap_iommu *)map1;
891 kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
898 struct bus_dmamap_iommu *map;
900 map = (struct bus_dmamap_iommu *)map1;
901 if (map == NULL)
903 memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc));
932 struct bus_dmamap_iommu *map;
937 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
938 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
940 tag = map->tag;
941 map->cansleep = true;
942 map->locked = false;
943 bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
944 &map->mem, map->callback, map->callback_arg,
946 map->cansleep = false;
947 if (map->locked) {
948 (tag->common.lockfunc)(tag->common.lockfuncarg,
951 map->locked = true;
952 map->cansleep = false;
959 iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
962 map->locked = false;
964 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
966 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
974 unit->dma_enabled = 0;
975 error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
977 TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
978 SYSCTL_ADD_INT(&unit->sysctl_ctx,
979 SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)),
980 OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0,
982 TAILQ_INIT(&unit->delayed_maps);
983 TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
984 unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
985 taskqueue_thread_enqueue, &unit->delayed_taskqueue);
986 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
987 "iommu%d busdma taskq", unit->unit);
995 if (unit->delayed_taskqueue == NULL)
998 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
999 taskqueue_free(unit->delayed_taskqueue);
1000 unit->delayed_taskqueue = NULL;
1009 struct bus_dmamap_iommu *map;
1025 if (tc->impl != &bus_dma_iommu_impl)
1029 ctx = tag->ctx;
1030 domain = ctx->domain;
1031 map = (struct bus_dmamap_iommu *)map1;
1037 entry->start = start;
1038 entry->end = start + length;
1046 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
1053 IOMMU_DMAMAP_LOCK(map);
1054 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
1055 IOMMU_DMAMAP_UNLOCK(map);
1076 TAILQ_SWAP(&domain->unload_entries, &entries,
1090 domain->ops = ops;
1091 domain->iommu = unit;
1093 TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain);
1094 RB_INIT(&domain->rb_root);
1095 TAILQ_INIT(&domain->unload_entries);
1096 mtx_init(&domain->lock, "iodom", NULL, MTX_DEF);
1103 mtx_destroy(&domain->lock);