Lines Matching +full:secure +full:- +full:reg +full:- +full:access
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (C) 2020-2022 Andrew Turner
79 #define VGIC_SGI_NUM (GIC_LAST_SGI - GIC_FIRST_SGI + 1)
80 #define VGIC_PPI_NUM (GIC_LAST_PPI - GIC_FIRST_PPI + 1)
81 #define VGIC_SPI_NUM (GIC_LAST_SPI - GIC_FIRST_SPI + 1)
127 /* Per-CPU data not needed by EL2 */
209 /* GICD_STATUSR - RAZ/WI as we don't report errors (yet) */
212 /* GICD_SETSPI_SR - RAZ/WI */
213 /* GICD_CLRSPI_SR - RAZ/WI */
214 /* GICD_IGROUPR - RAZ/WI as GICD_CTLR.ARE == 1 */
236 /* GICD_ITARGETSR - RAZ/WI as GICD_CTLR.ARE == 1 */
240 /* GICD_IGRPMODR - RAZ/WI from non-secure mode */
241 /* GICD_NSACR - RAZ/WI from non-secure mode */
242 /* GICD_SGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
243 /* GICD_CPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
244 /* GICD_SPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
327 /* GICR_CTLR - Ignore writes as no bits can be set */
333 /* GICR_STATUSR - RAZ/WI as we don't report errors (yet) */
334 /* GICR_WAKER - RAZ/WI from non-secure mode */
335 /* GICR_SETLPIR - RAZ/WI as no LPIs are supported */
336 /* GICR_CLRLPIR - RAZ/WI as no LPIs are supported */
337 /* GICR_PROPBASER - RAZ/WI as no LPIs are supported */
338 /* GICR_PENDBASER - RAZ/WI as no LPIs are supported */
339 /* GICR_INVLPIR - RAZ/WI as no LPIs are supported */
340 /* GICR_INVALLR - RAZ/WI as no LPIs are supported */
341 /* GICR_SYNCR - RAZ/WI as no LPIs are supported */
368 /* GICR_IGROUPR0 - RAZ/WI from non-secure mode */
387 /* GICR_ICFGR0 - RAZ/WI from non-secure mode */
391 /* GICR_IGRPMODR0 - RAZ/WI from non-secure mode */
392 /* GICR_NSCAR - RAZ/WI from non-secure mode */
430 vm = hyp->vm;
432 hypctx = hyp->ctx[i];
433 if (hypctx != NULL && (hypctx->vmpidr_el2 & GICD_AFF) == mpidr)
436 return (-1);
444 hyp->vgic = malloc(sizeof(*hyp->vgic), M_VGIC_V3,
446 vgic = hyp->vgic;
455 * GICv3 and GICv4, p. 4-464)
457 vgic->gicd_ctlr = 0;
459 mtx_init(&vgic->dist_mtx, "VGICv3 Distributor lock", NULL,
470 hypctx->vgic_cpu = malloc(sizeof(*hypctx->vgic_cpu),
472 vgic_cpu = hypctx->vgic_cpu;
474 mtx_init(&vgic_cpu->lr_mtx, "VGICv3 ICH_LR_EL2 lock", NULL, MTX_SPIN);
478 irq = &vgic_cpu->private_irqs[irqid];
480 mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
482 irq->irq = irqid;
483 irq->mpidr = hypctx->vmpidr_el2 & GICD_AFF;
484 irq->target_vcpu = vcpu_vcpuid(hypctx->vcpu);
485 MPASS(irq->target_vcpu >= 0);
489 irq->enabled = true;
490 irq->config = VGIC_CONFIG_EDGE;
493 irq->config = VGIC_CONFIG_LEVEL;
495 irq->priority = 0;
505 hypctx->vgic_v3_regs.ich_hcr_el2 = ICH_HCR_EL2_En;
520 hypctx->vgic_v3_regs.ich_vmcr_el2 =
523 hypctx->vgic_v3_regs.ich_vmcr_el2 &= ~ICH_VMCR_EL2_VEOIM;
524 hypctx->vgic_v3_regs.ich_vmcr_el2 |= ICH_VMCR_EL2_VENG0 |
527 hypctx->vgic_v3_regs.ich_lr_num = virt_features.ich_lr_num;
528 for (i = 0; i < hypctx->vgic_v3_regs.ich_lr_num; i++)
529 hypctx->vgic_v3_regs.ich_lr_el2[i] = 0UL;
530 vgic_cpu->ich_lr_used = 0;
531 TAILQ_INIT(&vgic_cpu->irq_act_pend);
533 hypctx->vgic_v3_regs.ich_apr_num = virt_features.ich_apr_num;
543 vgic_cpu = hypctx->vgic_cpu;
545 irq = &vgic_cpu->private_irqs[irqid];
546 mtx_destroy(&irq->irq_spinmtx);
549 mtx_destroy(&vgic_cpu->lr_mtx);
550 free(hypctx->vgic_cpu, M_VGIC_V3);
556 mtx_destroy(&hyp->vgic->dist_mtx);
557 free(hyp->vgic, M_VGIC_V3);
567 vgic = hyp->vgic;
568 max_count = vm_get_maxcpus(hyp->vm);
571 if (vgic->redist_start == 0 && vgic->redist_end == 0)
574 count = (vgic->redist_end - vgic->redist_start) /
590 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL) {
591 return (irq->pending || irq->level);
593 return (irq->pending);
602 MPASS(vcpuid < vm_get_maxcpus(hyp->vm));
604 mtx_assert(&vgic_cpu->lr_mtx, MA_OWNED);
605 mtx_assert(&irq->irq_spinmtx, MA_OWNED);
608 if (!irq->level && !irq->pending)
611 if (!irq->on_aplist) {
612 irq->on_aplist = true;
613 TAILQ_INSERT_TAIL(&vgic_cpu->irq_act_pend, irq, act_pend_list);
624 mask = ((1ul << (size * 8)) - 1) << (offset * 8);
637 gic_pidr2_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
643 /* Common read-only/write-ignored helpers */
645 gic_zero_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
652 gic_ignore_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
669 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
674 if (!irq->enabled)
696 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
701 irq->enabled = set;
717 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
742 hyp = hypctx->hyp;
750 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
756 target_vcpu = irq->target_vcpu;
759 target_hypctx = hyp->ctx[target_vcpu];
762 vgic_cpu = target_hypctx->vgic_cpu;
765 /* pending -> not pending */
766 irq->pending = false;
768 irq->pending = true;
769 mtx_lock_spin(&vgic_cpu->lr_mtx);
772 mtx_unlock_spin(&vgic_cpu->lr_mtx);
778 vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
795 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
800 if (irq->active)
819 hyp = hypctx->hyp;
826 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
832 target_vcpu = irq->target_vcpu;
835 target_hypctx = hyp->ctx[target_vcpu];
838 vgic_cpu = target_hypctx->vgic_cpu;
841 /* active -> not active */
842 irq->active = false;
844 /* not active -> active */
845 irq->active = true;
846 mtx_lock_spin(&vgic_cpu->lr_mtx);
849 mtx_unlock_spin(&vgic_cpu->lr_mtx);
855 vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
870 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
875 ret |= ((uint64_t)irq->priority) << (i * 8);
889 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
895 irq->priority = (val >> (i * 8)) & 0xf8;
911 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
916 ret |= ((uint64_t)irq->config) << (i * 2);
934 * an edge-triggered behaviour, and the register is
935 * implementation defined to be read-only for PPIs.
940 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
946 irq->config = (val >> (i * 2)) & VGIC_CONFIG_MASK;
957 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
961 mpidr = irq->mpidr;
973 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
977 irq->mpidr = gic_reg_value_64(irq->mpidr, val, offset, size) & GICD_AFF;
978 irq->target_vcpu = mpidr_to_vcpu(hypctx->hyp, irq->mpidr);
993 dist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
999 hyp = hypctx->hyp;
1000 vgic = hyp->vgic;
1002 mtx_lock_spin(&vgic->dist_mtx);
1003 *rval = vgic->gicd_ctlr;
1004 mtx_unlock_spin(&vgic->dist_mtx);
1011 dist_ctlr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1018 vgic = hypctx->hyp->vgic;
1024 * EnableGrp1A is supported, and RWP is read-only.
1026 * All other bits are RES0 from non-secure mode as we
1032 mtx_lock_spin(&vgic->dist_mtx);
1033 vgic->gicd_ctlr = wval;
1035 mtx_unlock_spin(&vgic->dist_mtx);
1040 dist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1045 typer = (10 - 1) << GICD_TYPER_IDBITS_SHIFT;
1048 typer |= howmany(VGIC_NIRQS + 1, 32) - 1;
1055 dist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1062 dist_setclrspi_nsr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1070 INJECT_IRQ(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), irqid,
1071 reg == GICD_SETSPI_NSR);
1076 dist_isenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1080 n = (reg - GICD_ISENABLER(0)) / 4;
1087 dist_isenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1094 n = (reg - GICD_ISENABLER(0)) / 4;
1102 dist_icenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1106 n = (reg - GICD_ICENABLER(0)) / 4;
1113 dist_icenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1120 n = (reg - GICD_ISENABLER(0)) / 4;
1128 dist_ispendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1132 n = (reg - GICD_ISPENDR(0)) / 4;
1139 dist_ispendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1146 n = (reg - GICD_ISPENDR(0)) / 4;
1154 dist_icpendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1158 n = (reg - GICD_ICPENDR(0)) / 4;
1165 dist_icpendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1172 n = (reg - GICD_ICPENDR(0)) / 4;
1181 dist_isactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1185 n = (reg - GICD_ISACTIVER(0)) / 4;
1192 dist_isactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1199 n = (reg - GICD_ISACTIVER(0)) / 4;
1207 dist_icactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1212 n = (reg - GICD_ICACTIVER(0)) / 4;
1219 dist_icactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1226 n = (reg - GICD_ICACTIVER(0)) / 4;
1233 /* Affinity routing is enabled so ipriorityr0-7 is RAZ/WI */
1235 dist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1240 n = (reg - GICD_IPRIORITYR(0)) / 4;
1241 /* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1247 dist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1252 irq_base = (reg - GICD_IPRIORITYR(0)) + offset;
1253 /* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1260 dist_icfgr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1264 n = (reg - GICD_ICFGR(0)) / 4;
1265 /* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1271 dist_icfgr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1278 n = (reg - GICD_ICFGR(0)) / 4;
1279 /* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1286 dist_irouter_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1290 n = (reg - GICD_IROUTER(0)) / 8;
1291 /* GICD_IROUTER0-31 don't exist */
1297 dist_irouter_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1302 n = (reg - GICD_IROUTER(0)) / 8;
1303 /* GICD_IROUTER0-31 don't exist */
1310 u_int reg_list_size, u_int reg, u_int size, uint64_t *rval, void *arg)
1315 if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1316 offset = reg & (reg_list[i].size - 1);
1317 reg -= offset;
1319 reg_list[i].read(hypctx, reg, rval, NULL);
1324 *rval &= (1ul << (size * 8)) - 1;
1328 * The access is an invalid size. Section
1329 * 12.1.3 "GIC memory-mapped register access"
1345 u_int reg_list_size, u_int reg, u_int size, uint64_t wval, void *arg)
1350 if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1351 offset = reg & (reg_list[i].size - 1);
1352 reg -= offset;
1354 reg_list[i].write(hypctx, reg, offset,
1376 uint64_t reg;
1379 hyp = hypctx->hyp;
1380 vgic = hyp->vgic;
1383 if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1387 reg = fault_ipa - vgic->dist_start;
1389 * As described in vgic_register_read an access with an invalid
1392 if ((reg & (size - 1)) != 0) {
1398 reg, size, rval, NULL))
1414 uint64_t reg;
1417 hyp = hypctx->hyp;
1418 vgic = hyp->vgic;
1421 if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1425 reg = fault_ipa - vgic->dist_start;
1427 * As described in vgic_register_read an access with an invalid
1430 if ((reg & (size - 1)) != 0)
1434 reg, size, wval, NULL))
1448 redist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1456 redist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1463 redist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1469 if (vcpu_vcpuid(hypctx->vcpu) == (vgic_max_cpu_count(hypctx->hyp) - 1))
1472 vmpidr_el2 = hypctx->vmpidr_el2;
1486 (uint64_t)vcpu_vcpuid(hypctx->vcpu) << GICR_TYPER_CPUNUM_SHIFT;
1500 redist_ienabler0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1507 redist_isenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1517 redist_icenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1527 redist_ipendr0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1534 redist_ispendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1544 redist_icpendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1554 redist_iactiver0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1561 redist_isactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1569 redist_icactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1577 redist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1582 n = (reg - GICR_IPRIORITYR(0)) / 4;
1587 redist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1592 irq_base = (reg - GICR_IPRIORITYR(0)) + offset;
1598 redist_icfgr1_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1604 redist_icfgr1_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1619 uint64_t reg;
1624 hyp = hypctx->hyp;
1625 vgic = hyp->vgic;
1628 if (fault_ipa < vgic->redist_start ||
1629 fault_ipa + size > vgic->redist_end) {
1633 vcpuid = (fault_ipa - vgic->redist_start) /
1635 if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1638 * does we don't panic a non-INVARIANTS kernel.
1648 /* Find the target vcpu ctx for the access */
1649 target_hypctx = hyp->ctx[vcpuid];
1654 * also be powered down so any access will raise an external
1662 reg = (fault_ipa - vgic->redist_start) %
1666 * As described in vgic_register_read an access with an invalid
1669 if ((reg & (size - 1)) != 0) {
1674 if (reg < GICR_RD_BASE_SIZE) {
1676 nitems(redist_rd_registers), reg, size, rval, NULL))
1678 } else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1680 nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1697 uint64_t reg;
1702 hyp = hypctx->hyp;
1703 vgic = hyp->vgic;
1706 if (fault_ipa < vgic->redist_start ||
1707 fault_ipa + size > vgic->redist_end) {
1711 vcpuid = (fault_ipa - vgic->redist_start) /
1713 if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1716 * does we don't panic a non-INVARIANTS kernel.
1725 /* Find the target vcpu ctx for the access */
1726 target_hypctx = hyp->ctx[vcpuid];
1731 * also be powered down so any access will raise an external
1739 reg = (fault_ipa - vgic->redist_start) %
1743 * As described in vgic_register_read an access with an invalid
1746 if ((reg & (size - 1)) != 0)
1749 if (reg < GICR_RD_BASE_SIZE) {
1751 nitems(redist_rd_registers), reg, size, wval, NULL))
1753 } else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1755 nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1791 /* Non-zero points at no vcpus */
1838 vgic = hyp->vgic;
1839 vgic->irqs = malloc((VGIC_NIRQS - VGIC_PRV_I_NUM) *
1840 sizeof(*vgic->irqs), M_VGIC_V3, M_WAITOK | M_ZERO);
1842 for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1843 irq = &vgic->irqs[i];
1845 mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
1848 irq->irq = i + VGIC_PRV_I_NUM;
1859 vgic = hyp->vgic;
1860 for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1861 irq = &vgic->irqs[i];
1863 mtx_destroy(&irq->irq_spinmtx);
1866 free(vgic->irqs, M_VGIC_V3);
1876 if (descr->ver.version != 3)
1883 if (!__is_aligned(descr->v3_regs.dist_start, PAGE_SIZE_64K) ||
1884 !__is_aligned(descr->v3_regs.redist_start, PAGE_SIZE_64K) ||
1885 !__is_aligned(descr->v3_regs.redist_size,
1890 if (descr->v3_regs.dist_size != PAGE_SIZE_64K)
1893 vm = hyp->vm;
1899 cpu_count = descr->v3_regs.redist_size /
1904 vgic = hyp->vgic;
1906 /* Set the distributor address and size for trapping guest access. */
1907 vgic->dist_start = descr->v3_regs.dist_start;
1908 vgic->dist_end = descr->v3_regs.dist_start + descr->v3_regs.dist_size;
1910 vgic->redist_start = descr->v3_regs.redist_start;
1911 vgic->redist_end = descr->v3_regs.redist_start +
1912 descr->v3_regs.redist_size;
1914 vm_register_inst_handler(vm, descr->v3_regs.dist_start,
1915 descr->v3_regs.dist_size, dist_read, dist_write);
1916 vm_register_inst_handler(vm, descr->v3_regs.redist_start,
1917 descr->v3_regs.redist_size, redist_read, redist_write);
1925 hyp->vgic_attached = true;
1933 if (hyp->vgic_attached) {
1934 hyp->vgic_attached = false;
1947 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm))
1949 hypctx = hyp->ctx[vcpuid];
1952 vgic_cpu = hypctx->vgic_cpu;
1953 irq = &vgic_cpu->private_irqs[irqid];
1955 irqid -= VGIC_PRV_I_NUM;
1958 irq = &hyp->vgic->irqs[irqid];
1966 mtx_lock_spin(&irq->irq_spinmtx);
1974 mtx_unlock_spin(&irq->irq_spinmtx);
1983 vgic_cpu = hypctx->vgic_cpu;
1984 mtx_lock_spin(&vgic_cpu->lr_mtx);
1985 empty = TAILQ_EMPTY(&vgic_cpu->irq_act_pend);
1986 mtx_unlock_spin(&vgic_cpu->lr_mtx);
1996 * - Level-triggered IRQ: level changes low -> high
1997 * - Edge-triggered IRQ: level is high
1999 switch (irq->config & VGIC_CONFIG_MASK) {
2001 return (level != irq->level);
2021 if (!hyp->vgic_attached)
2024 KASSERT(vcpuid == -1 || irqid < VGIC_PRV_I_NUM,
2034 target_vcpu = irq->target_vcpu;
2035 KASSERT(vcpuid == -1 || vcpuid == target_vcpu,
2038 KASSERT(target_vcpu >= 0 && target_vcpu < vm_get_maxcpus(hyp->vm),
2042 if (vcpuid == -1)
2044 /* TODO: Check from 0 to vm->maxcpus */
2045 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm)) {
2050 hypctx = hyp->ctx[vcpuid];
2057 vgic_cpu = hypctx->vgic_cpu;
2059 mtx_lock_spin(&vgic_cpu->lr_mtx);
2065 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL)
2066 irq->level = level;
2068 irq->pending = true;
2073 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2077 vcpu_notify_event(vm_vcpu(hyp->vm, vcpuid));
2086 uint64_t reg;
2088 vgic = hyp->vgic;
2091 if (addr < vgic->dist_start || addr + 4 > vgic->dist_end) {
2095 reg = addr - vgic->dist_start;
2096 if (reg != GICD_SETSPI_NSR)
2099 return (INJECT_IRQ(hyp, -1, msg, true));
2109 vgic_cpu = hypctx->vgic_cpu;
2118 mtx_lock_spin(&vgic_cpu->lr_mtx);
2120 hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_UIE;
2123 if (TAILQ_EMPTY(&vgic_cpu->irq_act_pend))
2126 KASSERT(vgic_cpu->ich_lr_used == 0, ("%s: Used LR count not zero %u",
2127 __func__, vgic_cpu->ich_lr_used));
2130 hypctx->vgic_v3_regs.ich_elrsr_el2 =
2131 (1u << hypctx->vgic_v3_regs.ich_lr_num) - 1;
2132 TAILQ_FOREACH(irq, &vgic_cpu->irq_act_pend, act_pend_list) {
2134 if (i == hypctx->vgic_v3_regs.ich_lr_num)
2137 if (!irq->enabled)
2140 hypctx->vgic_v3_regs.ich_lr_el2[i] = ICH_LR_EL2_GROUP1 |
2141 ((uint64_t)irq->priority << ICH_LR_EL2_PRIO_SHIFT) |
2142 irq->irq;
2144 if (irq->active) {
2145 hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2151 if ((irq->config & _MASK) == LEVEL)
2152 hypctx->vgic_v3_regs.ich_lr_el2[i] |= ICH_LR_EL2_EOI;
2155 if (!irq->active && vgic_v3_irq_pending(irq)) {
2156 hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2164 if ((irq->config & VGIC_CONFIG_MASK) ==
2166 irq->pending = false;
2172 vgic_cpu->ich_lr_used = i;
2175 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2186 vgic_cpu = hypctx->vgic_cpu;
2189 if (vgic_cpu->ich_lr_used == 0)
2195 * access unlocked.
2197 for (i = 0; i < vgic_cpu->ich_lr_used; i++) {
2198 lr = hypctx->vgic_v3_regs.ich_lr_el2[i];
2199 hypctx->vgic_v3_regs.ich_lr_el2[i] = 0;
2201 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
2206 irq->active = (lr & ICH_LR_EL2_STATE_ACTIVE) != 0;
2208 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_EDGE) {
2214 irq->pending = true;
2224 irq->pending = false;
2229 mtx_lock_spin(&vgic_cpu->lr_mtx);
2230 if (irq->active) {
2232 TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2234 TAILQ_INSERT_HEAD(&vgic_cpu->irq_act_pend, irq,
2238 TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2240 irq->on_aplist = false;
2242 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2246 hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_EOICOUNT_MASK;
2247 vgic_cpu->ich_lr_used = 0;