Lines Matching +full:iommu +full:- +full:addresses
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
66 #include <dev/iommu/busdma_iommu.h>
67 #include <x86/iommu/intel_reg.h>
68 #include <x86/iommu/x86_iommu.h>
69 #include <x86/iommu/intel_dmar.h>
90 level, it is non-zero if superpages
104 * - lvl is the level to build;
105 * - idx is the index of the page table page in the pgtbl_obj, which is
107 * - addr is the starting address in the bus address space which is
121 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
122 if (addr >= tbl->maxaddr)
124 (void)iommu_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
127 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
128 if (lvl != tbl->leaf) {
132 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
133 pte = iommu_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
134 if (lvl == tbl->leaf) {
136 if (f >= tbl->maxaddr)
143 if (f >= tbl->maxaddr)
145 m1 = iommu_pgalloc(tbl->pgtbl_obj, base + i,
154 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
158 * Find a ready and compatible identity-mapping page table in the
159 * cache. If not found, populate the identity-mapping page table for
179 for (i = 0; i < domain->pglvl; i++) {
180 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
196 if (tbl->maxaddr >= maxaddr &&
197 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
198 tbl->leaf == leaf) {
199 res = tbl->pgtbl_obj;
202 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
215 if (tbl->maxaddr >= maxaddr &&
216 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
217 tbl->leaf == leaf) {
218 res = tbl->pgtbl_obj;
221 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
230 tbl->pglvl = domain->pglvl;
231 tbl->leaf = leaf;
232 tbl->maxaddr = maxaddr;
233 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
234 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
239 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
241 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
243 res = tbl->pgtbl_obj;
261 unit = domain->dmar;
269 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
288 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
295 if (obj->ref_count > 1) {
308 rmobj = tbl->pgtbl_obj;
309 if (rmobj->ref_count == 1) {
312 rmobj->resident_page_count);
337 idx = pglvl_pgtbl_get_pindex(domain->pglvl, base, lvl);
345 pte = iommu_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
354 m = iommu_pgalloc(domain->pgtbl_obj, idx, flags |
369 ptep = dmar_pgtbl_map_pte(domain, base, lvl - 1,
372 KASSERT(m->pindex != 0,
375 iommu_pgfree(domain->pgtbl_obj, m->pindex,
379 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
381 dmar_flush_pte_to_ram(domain->dmar, ptep);
389 pte += pglvl_pgtbl_pte_off(domain->pglvl, base, lvl);
411 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
416 if (lvl == domain->pglvl - 1)
426 * alignment of both guest and host addresses.
428 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
429 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
435 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
455 dmar_unmap_buf_locked(domain, base1, base - base1,
460 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
462 dmar_flush_pte_to_ram(domain->dmar, pte);
481 base = entry->start;
482 size = entry->end - entry->start;
490 unit = domain->dmar;
492 KASSERT((iodom->flags & IOMMU_DOMAIN_IDMAP) == 0,
495 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
498 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
502 KASSERT(base < (1ULL << domain->agaw),
504 (uintmax_t)size, domain->agaw));
505 KASSERT(base + size < (1ULL << domain->agaw),
507 (uintmax_t)size, domain->agaw));
517 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
521 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
533 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
535 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
569 dmar_pte_clear(&pte->pte);
570 dmar_flush_pte_to_ram(domain->dmar, pte);
581 KASSERT(m->pindex != 0,
584 iommu_pgfree(domain->pgtbl_obj, m->pindex, flags, entry);
585 dmar_free_pgtbl_pde(domain, base, lvl - 1, flags, entry);
605 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
608 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
611 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
613 KASSERT(base < (1ULL << domain->agaw),
615 (uintmax_t)size, domain->agaw));
616 KASSERT(base + size < (1ULL << domain->agaw),
618 (uintmax_t)size, domain->agaw));
628 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
629 for (lvl = 0; lvl < domain->pglvl; lvl++) {
630 if (lvl != domain->pglvl - 1 &&
641 if ((pte->pte & DMAR_PTE_SP) != 0 ||
642 lvl == domain->pglvl - 1) {
673 error = dmar_unmap_buf_locked(domain, entry->start, entry->end -
674 entry->start, flags, entry);
685 KASSERT(domain->pgtbl_obj == NULL,
688 unit = domain->dmar;
689 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
690 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
691 if (unit->memdomain != -1) {
692 domain->pgtbl_obj->domain.dr_policy = DOMAINSET_PREF(
693 unit->memdomain);
696 m = iommu_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
702 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
713 obj = domain->pgtbl_obj;
715 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
716 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
721 domain->pgtbl_obj = NULL;
723 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
725 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
764 unit = domain->dmar;
765 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
766 unit->iommu.unit));
767 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
769 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
771 DMAR_IOTLB_DID(domain->domain), iro);
774 ("dmar%d: invalidation failed %jx", unit->iommu.unit,
777 for (; size > 0; base += isize, size -= isize) {
782 DMAR_IOTLB_DID(domain->domain), iro);
787 unit->iommu.unit, (uintmax_t)iotlbr,
790 * Any non-page granularity covers whole guest