Lines Matching +full:pci +full:- +full:domain

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
72 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
75 struct dmar_domain *domain);
76 static void dmar_domain_destroy(struct dmar_domain *domain);
90 ctxm = iommu_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC);
101 ctxm = iommu_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO |
103 re = iommu_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf);
105 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
120 ctxp = iommu_map_pgtbl(dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->context.rid),
122 ctxp += ctx->context.rid & 0xff;
127 ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
133 * doubleword is not yet updated. The domain id is stored in
139 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) |
140 domain->awlvl);
142 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
144 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
155 struct dmar_domain *domain;
159 domain = CTX2DOM(ctx);
160 unit = DOM2DMAR(domain);
161 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
163 unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
164 pci_get_function(ctx->context.tag->owner),
165 ctxp->ctx1, ctxp->ctx2));
167 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 &&
168 (unit->hw_ecap & DMAR_ECAP_PT) != 0) {
169 KASSERT(domain->pgtbl_obj == NULL,
170 ("ctx %p non-null pgtbl_obj", ctx));
173 ctx_root = iommu_pgalloc(domain->pgtbl_obj, 0,
180 ctx_id_entry_init_one(&ctxp[i], domain, ctx_root);
183 ctx_id_entry_init_one(ctxp, domain, ctx_root);
198 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force)
200 if (dmar->qi_enabled) {
202 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)
207 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force))
213 domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
229 dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path,
233 * VT-d specification requires that the start of an
234 * RMRR entry is 4k-aligned. Buggy BIOSes put
241 start = entry->start;
242 end = entry->end;
244 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
245 domain->iodom.iommu->unit, bus, slot, func,
247 entry->start = trunc_page(start);
248 entry->end = round_page(end);
249 if (entry->start == entry->end) {
254 printf("pci%d:%d:%d ", bus, slot, func);
257 domain->iodom.iommu->unit, start, end);
259 entry->end += IOMMU_PAGE_SIZE * 0x20;
261 size = OFF_TO_IDX(entry->end - entry->start);
264 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
267 error1 = iommu_gas_map_region(DOM2IODOM(domain), entry,
271 * Non-failed RMRR entries are owned by context rb
276 if (error1 == 0 && entry->end != entry->start) {
277 IOMMU_LOCK(domain->iodom.iommu);
278 domain->refs++; /* XXXKIB prevent free */
279 domain->iodom.flags |= IOMMU_DOMAIN_RMRR;
280 IOMMU_UNLOCK(domain->iodom.iommu);
285 printf("pci%d:%d:%d ", bus, slot, func);
288 domain->iodom.iommu->unit, start, end,
303 * PCI memory address space is shared between memory-mapped devices (MMIO) and
306 * peer-to-peer and not forwarded to an IOMMU. To avoid this, reserve the
311 dmar_reserve_pci_regions(struct dmar_domain *domain, device_t dev)
319 iodom = DOM2IODOM(domain);
330 device_printf(dev, "DMAR reserve [%#jx-%#jx] (error %d)\n",
353 device_printf(dev, "DMAR reserve [%#jx-%#jx] "
367 struct dmar_domain *domain;
370 id = alloc_unr(dmar->domids);
371 if (id == -1)
373 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
374 iodom = DOM2IODOM(domain);
376 domain->domain = id;
377 LIST_INIT(&iodom->contexts);
380 domain->dmar = dmar;
384 * installed memory to calculate the mgaw on id_mapped domain.
388 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
389 mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped);
390 error = domain_set_agaw(domain, mgaw);
395 domain->iodom.end = 1ULL << (domain->agaw - 1);
397 iommu_gas_init_domain(DOM2IODOM(domain));
400 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
401 domain->pgtbl_obj = dmar_get_idmap_pgtbl(domain,
402 domain->iodom.end);
404 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
406 error = dmar_domain_alloc_pgtbl(domain);
411 0xfeefffff + 1, &iodom->msi_entry);
415 return (domain);
418 dmar_domain_destroy(domain);
423 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
428 ctx->context.domain = DOM2IODOM(domain);
429 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
431 ctx->context.rid = rid;
432 ctx->context.refs = 1;
439 struct dmar_domain *domain;
441 domain = CTX2DOM(ctx);
442 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
443 KASSERT(domain->refs >= domain->ctx_cnt,
444 ("dom %p ref underflow %d %d", domain, domain->refs,
445 domain->ctx_cnt));
446 domain->refs++;
447 domain->ctx_cnt++;
448 LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link);
454 struct dmar_domain *domain;
456 domain = CTX2DOM(ctx);
457 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
458 KASSERT(domain->refs > 0,
459 ("domain %p ctx dtr refs %d", domain, domain->refs));
460 KASSERT(domain->ctx_cnt >= domain->refs,
461 ("domain %p ctx dtr refs %d ctx_cnt %d", domain,
462 domain->refs, domain->ctx_cnt));
463 domain->refs--;
464 domain->ctx_cnt--;
465 LIST_REMOVE(&ctx->context, link);
469 dmar_domain_destroy(struct dmar_domain *domain)
474 iodom = DOM2IODOM(domain);
476 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
477 ("unfinished unloads %p", domain));
478 KASSERT(LIST_EMPTY(&iodom->contexts),
479 ("destroying dom %p with contexts", domain));
480 KASSERT(domain->ctx_cnt == 0,
481 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
482 KASSERT(domain->refs == 0,
483 ("destroying dom %p with refs %d", domain, domain->refs));
484 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
485 DMAR_DOMAIN_LOCK(domain);
487 DMAR_DOMAIN_UNLOCK(domain);
489 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
490 if (domain->pgtbl_obj != NULL)
491 DMAR_DOMAIN_PGLOCK(domain);
492 dmar_domain_free_pgtbl(domain);
495 dmar = DOM2DMAR(domain);
496 free_unr(dmar->domids, domain->domain);
497 free(domain, M_DMAR_DOMAIN);
505 struct dmar_domain *domain, *domain1;
527 ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
565 domain = domain1;
568 ctx->context.tag->owner = dev;
576 if (LIST_EMPTY(&dmar->domains))
578 LIST_INSERT_HEAD(&dmar->domains, domain, link);
582 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
583 "agaw %d %s-mapped\n",
584 dmar->iommu.unit, dmar->segment, bus, slot,
585 func, rid, domain->domain, domain->mgaw,
586 domain->agaw, id_mapped ? "id" : "re");
594 domain = CTX2DOM(ctx);
595 ctx->context.refs++; /* tag referenced us */
598 domain = CTX2DOM(ctx);
599 if (ctx->context.tag->owner == NULL)
600 ctx->context.tag->owner = dev;
601 ctx->context.refs++; /* tag referenced us */
616 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
620 dmar->iommu.unit);
625 dmar->iommu.unit);
629 "error %d\n", dmar->iommu.unit, error);
666 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
674 dmar = domain->dmar;
676 if (domain == old_domain)
678 KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
679 ("domain %p %u moving between dmars %u %u", domain,
680 domain->domain, old_domain->iodom.iommu->unit,
681 domain->iodom.iommu->unit));
687 ctx->context.domain = &domain->iodom;
693 printf("dmar%d rid %x domain %d->%d %s-mapped\n",
694 dmar->iommu.unit, ctx->context.rid, old_domain->domain,
695 domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ?
703 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
707 KASSERT(domain->refs >= 1,
708 ("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
709 domain->refs));
710 KASSERT(domain->refs > domain->ctx_cnt,
711 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
712 domain->refs, domain->ctx_cnt));
714 if (domain->refs > 1) {
715 domain->refs--;
720 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0,
721 ("lost ref on RMRR domain %p", domain));
723 LIST_REMOVE(domain, link);
726 taskqueue_drain(dmar->iommu.delayed_taskqueue,
727 &domain->iodom.unload_task);
728 dmar_domain_destroy(domain);
736 struct dmar_domain *domain;
739 KASSERT(ctx->context.refs >= 1,
740 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->context.refs));
746 if (ctx->context.refs > 1) {
747 ctx->context.refs--;
752 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
764 KASSERT(ctx->context.refs >= 1,
765 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->context.refs));
771 if (ctx->context.refs > 1) {
772 ctx->context.refs--;
779 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
786 dmar_pte_clear(&ctxp->ctx1);
787 ctxp->ctx2 = 0;
790 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
791 if (dmar->qi_enabled)
797 domain = CTX2DOM(ctx);
799 free(ctx->context.tag, M_DMAR_CTX);
801 dmar_unref_domain_locked(dmar, domain);
806 * Returns with the domain locked.
811 struct dmar_domain *domain;
816 LIST_FOREACH(domain, &dmar->domains, link) {
817 LIST_FOREACH(ctx, &domain->iodom.contexts, link) {
818 if (ctx->rid == rid)
833 struct dmar_domain *domain;
836 domain = IODOM2DOM(entry->domain);
837 unit = DOM2DMAR(domain);
844 if (unit->qi_enabled) {
847 iommu_qi_invalidate_locked(&domain->iodom, entry,
851 iommu_qi_invalidate_sync(&domain->iodom, entry->start,
852 entry->end - entry->start, cansleep);
856 dmar_flush_iotlb_sync(domain, entry->start, entry->end -
857 entry->start);
863 dmar_domain_unload_emit_wait(struct dmar_domain *domain,
869 return (domain->batch_no++ % iommu_qi_batch_coalesce == 0);
876 struct dmar_domain *domain;
881 domain = IODOM2DOM(iodom);
882 unit = DOM2DMAR(domain);
885 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
886 ("not mapped entry %p %p", domain, entry));
887 error = iodom->ops->unmap(iodom, entry,
889 KASSERT(error == 0, ("unmap %p error %d", domain, error));
890 if (!unit->qi_enabled) {
891 dmar_flush_iotlb_sync(domain, entry->start,
892 entry->end - entry->start);
900 KASSERT(unit->qi_enabled, ("loaded entry left"));
904 iommu_qi_invalidate_locked(&domain->iodom, entry,
905 dmar_domain_unload_emit_wait(domain, entry));