Lines Matching full:unit

62 dmar_enable_qi(struct dmar_unit *unit)
66 DMAR_ASSERT_LOCKED(unit);
67 unit->hw_gcmd |= DMAR_GCMD_QIE;
68 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
69 DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
75 dmar_disable_qi(struct dmar_unit *unit)
79 DMAR_ASSERT_LOCKED(unit);
80 unit->hw_gcmd &= ~DMAR_GCMD_QIE;
81 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
82 DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
90 struct dmar_unit *unit;
92 unit = IOMMU2DMAR(iommu);
93 DMAR_ASSERT_LOCKED(unit);
94 dmar_write4(unit, DMAR_IQT_REG, unit->x86c.inv_queue_tail);
100 struct dmar_unit *unit;
104 unit = IOMMU2DMAR(iommu);
105 DMAR_ASSERT_LOCKED(unit);
108 if (bytes <= unit->x86c.inv_queue_avail)
111 head = dmar_read4(unit, DMAR_IQH_REG);
113 unit->x86c.inv_queue_avail = head - unit->x86c.inv_queue_tail -
115 if (head <= unit->x86c.inv_queue_tail)
116 unit->x86c.inv_queue_avail += unit->x86c.inv_queue_size;
117 if (bytes <= unit->x86c.inv_queue_avail)
130 dmar_qi_advance_tail(DMAR2IOMMU(unit));
131 unit->x86c.inv_queue_full++;
134 unit->x86c.inv_queue_avail -= bytes;
138 dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
141 DMAR_ASSERT_LOCKED(unit);
143 atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
144 unit->x86c.inv_queue_tail), data1);
146 *(volatile uint64_t *)(unit->x86c.inv_queue +
147 unit->x86c.inv_queue_tail) = data1;
149 unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
150 KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
151 ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
152 (uintmax_t)unit->x86c.inv_queue_size));
153 unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
155 atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
156 unit->x86c.inv_queue_tail), data2);
158 *(volatile uint64_t *)(unit->x86c.inv_queue +
159 unit->x86c.inv_queue_tail) = data2;
161 unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
162 KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
163 ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
164 (uintmax_t)unit->x86c.inv_queue_size));
165 unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
172 struct dmar_unit *unit;
174 unit = IOMMU2DMAR(iommu);
175 DMAR_ASSERT_LOCKED(unit);
176 dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
181 memw ? unit->x86c.inv_waitd_seq_hw_phys : 0);
188 struct dmar_unit *unit;
194 unit = domain->dmar;
195 DMAR_ASSERT_LOCKED(unit);
197 am = calc_am(unit, base, size, &isize);
198 dmar_qi_ensure(DMAR2IOMMU(unit), 1);
199 dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
205 iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), pseq, emit_wait);
209 dmar_qi_invalidate_glob_impl(struct dmar_unit *unit, uint64_t data1)
213 DMAR_ASSERT_LOCKED(unit);
214 dmar_qi_ensure(DMAR2IOMMU(unit), 2);
215 dmar_qi_emit(unit, data1, 0);
216 iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
218 unit->x86c.inv_seq_waiters++;
219 dmar_qi_advance_tail(DMAR2IOMMU(unit));
220 iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, false);
224 dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
226 dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_CTX_INV |
231 dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
233 dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IOTLB_INV |
239 dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
241 dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IEC_INV);
245 dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
250 DMAR_ASSERT_LOCKED(unit);
251 KASSERT(start < unit->irte_cnt && start < start + cnt &&
252 start + cnt <= unit->irte_cnt,
253 ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
257 dmar_qi_ensure(DMAR2IOMMU(unit), 1);
258 dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
262 dmar_qi_ensure(DMAR2IOMMU(unit), 1);
263 iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
269 unit->x86c.inv_seq_waiters++;
271 dmar_qi_advance_tail(DMAR2IOMMU(unit));
284 * since we own the dmar unit lock until whole invalidation
288 iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, true);
294 struct dmar_unit *unit;
296 unit = IOMMU2DMAR((struct iommu_unit *)arg);
297 KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
298 unit->iommu.unit));
299 taskqueue_enqueue(unit->x86c.qi_taskqueue, &unit->x86c.qi_task);
306 struct dmar_unit *unit;
309 unit = IOMMU2DMAR(arg);
310 iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
316 ics = dmar_read4(unit, DMAR_ICS_REG);
319 dmar_write4(unit, DMAR_ICS_REG, ics);
327 iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
330 if (unit->x86c.inv_seq_waiters > 0) {
335 DMAR_LOCK(unit);
336 wakeup(&unit->x86c.inv_seq_waiters);
337 DMAR_UNLOCK(unit);
342 dmar_init_qi(struct dmar_unit *unit)
348 if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
350 unit->qi_enabled = 1;
351 TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
352 if (!unit->qi_enabled)
355 unit->x86c.qi_buf_maxsz = DMAR_IQA_QS_MAX;
356 unit->x86c.qi_cmd_sz = DMAR_IQ_DESCR_SZ;
357 iommu_qi_common_init(DMAR2IOMMU(unit), dmar_qi_task);
363 qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE);
365 DMAR_LOCK(unit);
366 dmar_write8(unit, DMAR_IQT_REG, 0);
367 iqa = pmap_kextract((uintptr_t)unit->x86c.inv_queue);
369 dmar_write8(unit, DMAR_IQA_REG, iqa);
370 dmar_enable_qi(unit);
371 ics = dmar_read4(unit, DMAR_ICS_REG);
374 dmar_write4(unit, DMAR_ICS_REG, ics);
376 dmar_enable_qi_intr(DMAR2IOMMU(unit));
377 DMAR_UNLOCK(unit);
390 dmar_fini_qi(struct dmar_unit *unit)
392 if (!unit->qi_enabled)
394 iommu_qi_common_fini(DMAR2IOMMU(unit), dmar_fini_qi_helper);
395 unit->qi_enabled = 0;
401 struct dmar_unit *unit;
404 unit = IOMMU2DMAR(iommu);
405 DMAR_ASSERT_LOCKED(unit);
406 KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
407 unit->iommu.unit));
408 iectl = dmar_read4(unit, DMAR_IECTL_REG);
410 dmar_write4(unit, DMAR_IECTL_REG, iectl);
416 struct dmar_unit *unit;
419 unit = IOMMU2DMAR(iommu);
420 DMAR_ASSERT_LOCKED(unit);
421 KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
422 unit->iommu.unit));
423 iectl = dmar_read4(unit, DMAR_IECTL_REG);
424 dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);