Lines Matching full:entry
31 #define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry)
110 iommu_gas_free_entry(struct iommu_map_entry *entry)
115 n = vm_page_free_pages_toq(&entry->pgtbl_free, false);
119 domain = entry->domain;
122 uma_zfree(iommu_map_entry_zone, entry);
130 KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
132 KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
155 iommu_gas_augment_entry(struct iommu_map_entry *entry)
161 bound = entry->start;
162 if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
166 delta = bound - entry->first;
167 entry->first = bound;
168 bound = entry->end;
169 if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
174 delta += entry->last - bound;
176 delta = entry->free_down - free_down;
177 entry->last = bound;
178 entry->free_down = free_down;
194 struct iommu_map_entry *entry, *l, *r;
197 RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
198 KASSERT(domain == entry->domain,
199 ("mismatched free domain %p entry %p entry->domain %p",
200 domain, entry, entry->domain));
201 l = RB_LEFT(entry, rb_entry);
202 r = RB_RIGHT(entry, rb_entry);
206 v = MAX(v, entry->start - l->last);
210 v = MAX(v, r->first - entry->end);
212 MPASS(entry->free_down == v);
218 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
222 /* Removing entry may open a new free gap before domain->start_gap. */
223 if (entry->end <= domain->start_gap->end) {
224 if (RB_RIGHT(entry, rb_entry) != NULL)
225 nbr = iommu_gas_entries_tree_RB_NEXT(entry);
226 else if (RB_LEFT(entry, rb_entry) != NULL)
227 nbr = RB_LEFT(entry, rb_entry);
229 nbr = RB_PARENT(entry, rb_entry);
232 RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
277 struct iommu_map_entry *entry;
283 entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
284 KASSERT(entry->start == 0, ("start entry start %p", domain));
285 KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
286 KASSERT(entry->flags ==
288 ("start entry flags %p", domain));
289 iommu_gas_rb_remove(domain, entry);
290 iommu_gas_free_entry(entry);
292 entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
293 KASSERT(entry->start == domain->end, ("end entry start %p", domain));
294 KASSERT(entry->end == domain->end, ("end entry end %p", domain));
295 KASSERT(entry->flags ==
297 ("end entry flags %p", domain));
298 iommu_gas_rb_remove(domain, entry);
299 iommu_gas_free_entry(entry);
307 struct iommu_map_entry *entry;
320 struct iommu_map_entry *entry;
326 * causes page alignment for the entry->start too.
328 * Create IOMMU_PAGE_SIZE gaps before, after new entry
364 * start of the next entry, then we do not have gap.
373 entry = a->entry;
374 entry->start = start;
375 entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE);
376 entry->flags = IOMMU_MAP_ENTRY_MAP;
380 /* Find the next entry that might abut a big-enough range. */
388 /* Find next entry in right subtree. */
394 /* Find next entry in a left-parent ancestor. */
408 * domain->start_gap points to a map entry less than or adjacent to the first
419 KASSERT(a->entry->flags == 0,
420 ("dirty entry %p %p", domain, a->entry));
423 * start_gap may point to an entry adjacent to gaps too small for any
465 &domain->rb_root, curr, a->entry);
476 &domain->rb_root, curr, a->entry);
483 * the nearest ancestor that spans highaddr. Then find the last entry
509 &domain->rb_root, curr, a->entry);
516 &domain->rb_root, curr, a->entry);
525 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
532 if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
533 (entry->end & IOMMU_PAGE_MASK) != 0)
535 if (entry->start >= entry->end)
537 if (entry->end >= domain->end)
540 entry->flags |= IOMMU_MAP_ENTRY_FAKE;
541 next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
543 (uintmax_t)entry->start));
546 entry->flags &= ~IOMMU_MAP_ENTRY_FAKE;
556 if (prev != NULL && prev->end > entry->start &&
561 entry->start = prev->end;
563 if (next->start < entry->end &&
568 entry->end = next->start;
570 if (entry->end == entry->start)
573 if (prev != NULL && prev->end > entry->start) {
574 /* This assumes that prev is the placeholder entry. */
579 &domain->rb_root, next, entry);
580 if (next->start < entry->end) {
586 entry->flags = IOMMU_MAP_ENTRY_RMRR;
590 ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
591 in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
594 entry, entry->start, entry->end, prev,
599 entry, entry->start, entry->end, next,
608 iommu_gas_free_space(struct iommu_map_entry *entry)
612 domain = entry->domain;
613 KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
615 ("permanent entry %p %p", domain, entry));
618 iommu_gas_rb_remove(domain, entry);
619 entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
628 iommu_gas_free_region(struct iommu_map_entry *entry)
632 domain = entry->domain;
633 KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
635 ("non-RMRR entry %p %p", domain, entry));
638 if (entry != domain->first_place &&
639 entry != domain->last_place)
640 iommu_gas_rb_remove(domain, entry);
641 entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
649 struct iommu_map_entry *entry, *res, fentry;
656 * Find an entry which contains the supplied guest's address
657 * start, or the first entry after the start. Since we
658 * asserted that start is below domain end, entry should
665 entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry);
667 if (entry->start >= start ||
668 (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
669 return (entry);
673 *res = *entry;
674 res->start = entry->end = start;
675 RB_UPDATE_AUGMENT(entry, rb_entry);
677 &domain->rb_root, entry, res);
683 iommu_gaddr_t end, struct iommu_map_entry *entry,
686 if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
689 *r = *entry;
690 r->end = entry->start = end;
691 RB_UPDATE_AUGMENT(entry, rb_entry);
693 &domain->rb_root, entry, r);
699 struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp)
703 if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED |
707 MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0);
708 entry->flags |= IOMMU_MAP_ENTRY_REMOVING;
709 TAILQ_INSERT_TAIL(gcp, entry, dmamap_link);
718 struct iommu_map_entry *entry, *nentry;
726 RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) {
727 if (entry->start >= end)
729 KASSERT(start <= entry->start,
730 ("iommu_gas_remove entry (%#jx, %#jx) start %#jx",
731 entry->start, entry->end, start));
732 iommu_gas_remove_unmap(domain, entry, gc);
734 if (iommu_gas_remove_clip_right(domain, end, entry, *r2)) {
740 RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
741 if ((entry->flags & (IOMMU_MAP_ENTRY_RMRR |
744 KASSERT(entry->end <= start || entry->start >= end,
745 ("iommu_gas_remove leftover entry (%#jx, %#jx) range "
747 entry->start, entry->end, start, end));
804 struct iommu_map_entry *entry;
814 entry = iommu_gas_alloc_entry(domain,
816 if (entry == NULL)
818 a.entry = entry;
823 iommu_gas_free_entry(entry);
832 KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
833 (uintmax_t)entry->end, (uintmax_t)domain->end));
834 entry->flags |= eflags;
837 error = domain->ops->map(domain, entry, ma, eflags,
840 iommu_domain_unload_entry(entry, true,
847 *res = entry;
852 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
858 KASSERT(entry->domain == domain,
859 ("mismatched domain %p entry %p entry->domain %p", domain,
860 entry, entry->domain));
861 KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
862 entry, entry->flags));
866 start = entry->start;
868 error = iommu_gas_alloc_region(domain, entry, flags);
873 entry->flags |= eflags;
875 if (entry->end == entry->start)
878 error = domain->ops->map(domain, entry,
879 ma + OFF_TO_IDX(start - entry->start), eflags,
882 iommu_domain_unload_entry(entry, false,
894 iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
900 entry->start = start;
901 entry->end = end;
902 error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
904 entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
912 struct iommu_map_entry *entry;
915 entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
917 error = iommu_gas_reserve_region_locked(domain, start, end, entry);
920 iommu_gas_free_entry(entry);
922 *entry0 = entry;
934 struct iommu_map_entry *entry, *next, *prev, key = {};
939 entry = NULL;
942 /* Preallocate an entry. */
943 if (entry == NULL)
944 entry = iommu_gas_alloc_entry(domain,
946 /* Calculate the free region from here to the next entry. */
950 KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
963 entry_start, entry_end, entry);
968 entry = NULL;
972 /* Release a preallocated entry if it was not used. */
973 if (entry != NULL)
974 iommu_gas_free_entry(entry);
981 struct iommu_map_entry *entry;
985 entry = domain->msi_entry;
986 if (entry == NULL)
989 domain->ops->unmap(domain, entry, IOMMU_PGF_WAITOK);
991 iommu_gas_free_space(entry);
993 iommu_gas_free_entry(entry);
1005 struct iommu_map_entry *entry;
1013 entry = domain->msi_entry;
1016 if (entry == NULL) {
1018 eflags, flags, ma, &entry);
1025 domain->msi_entry = entry;
1026 domain->msi_base = entry->start;
1031 * MSI page allocated. Free the unneeded entry.
1033 iommu_gas_free_entry(entry);
1055 ("%s: Address is below the MSI entry start address (%jx < %jx)",
1059 ("%s: Address is above the MSI entry end address (%jx < %jx)",
1079 struct iommu_map_entry *entry;
1084 RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
1087 entry, (uintmax_t)entry->start, (uintmax_t)entry->end,
1088 entry->flags,
1089 (uintmax_t)entry->first, (uintmax_t)entry->last,
1090 (uintmax_t)entry->free_down);
1091 if (entry == domain->start_gap)
1093 if (entry == domain->first_place)
1095 if (entry == domain->last_place)