xref: /netbsd-src/sys/arch/aarch64/aarch64/efi_machdep.c (revision 325dc460fcb903ba21d515d6422d8abf39bc692e)
1 /* $NetBSD: efi_machdep.c,v 1.13 2022/05/03 20:10:20 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jared McNeill <jmcneill@invisible.ca>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: efi_machdep.c,v 1.13 2022/05/03 20:10:20 skrll Exp $");
34 
35 #include <sys/param.h>
36 #include <uvm/uvm_extern.h>
37 
38 #include <arm/cpufunc.h>
39 
40 #include <arm/arm/efi_runtime.h>
41 
42 #include <aarch64/machdep.h>
43 
44 static struct {
45 	struct faultbuf	faultbuf;
46 	bool		fpu_used;
47 } arm_efirt_state;
48 
49 static bool efi_userva = true;
50 
51 void
52 arm_efirt_md_map_range(vaddr_t va, paddr_t pa, size_t sz,
53     enum arm_efirt_mem_type type)
54 {
55 	int flags = 0;
56 	int prot = 0;
57 
58 	switch (type) {
59 	case ARM_EFIRT_MEM_CODE:
60 		/* need write permission because fw devs */
61 		prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
62 		break;
63 	case ARM_EFIRT_MEM_DATA:
64 		prot = VM_PROT_READ | VM_PROT_WRITE;
65 		break;
66 	case ARM_EFIRT_MEM_MMIO:
67 		prot = VM_PROT_READ | VM_PROT_WRITE;
68 		flags = PMAP_DEV;
69 		break;
70 	default:
71 		panic("%s: unsupported type %d", __func__, type);
72 	}
73 
74 	/* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */
75 	bool kva = (va & AARCH64_ADDRTOP_TAG) != 0;
76 	if (kva) {
77 		if (va < EFI_RUNTIME_VA ||
78 		    va >= EFI_RUNTIME_VA + EFI_RUNTIME_SIZE) {
79 			printf("Incorrect EFI mapping address %" PRIxVADDR "\n", va);
80 		    return;
81 		}
82 		efi_userva = false;
83 	} else {
84 		if (!efi_userva) {
85 			printf("Can't mix EFI RT address spaces\n");
86 			return;
87 		}
88 	}
89 
90 	while (sz != 0) {
91 		if (kva) {
92 			pmap_kenter_pa(va, pa, prot, flags);
93 		} else {
94 			pmap_enter(pmap_efirt(), va, pa, prot, flags | PMAP_WIRED);
95 		}
96 		va += PAGE_SIZE;
97 		pa += PAGE_SIZE;
98 		sz -= PAGE_SIZE;
99 	}
100 	if (kva)
101 		pmap_update(pmap_kernel());
102 	else
103 		pmap_update(pmap_efirt());
104 }
105 
106 int
107 arm_efirt_md_enter(void)
108 {
109 	kpreempt_disable();
110 
111 	struct lwp * const l = curlwp;
112 
113 	/* Save FPU state */
114 	arm_efirt_state.fpu_used = fpu_used_p(l) != 0;
115 	if (arm_efirt_state.fpu_used)
116 		fpu_save(l);
117 
118 	/* Enable FP access (AArch64 UEFI calling convention) */
119 	reg_cpacr_el1_write(CPACR_FPEN_ALL);
120 	isb();
121 
122 	/*
123 	 * Install custom fault handler. EFI lock is held across calls so
124 	 * shared faultbuf is safe here.
125 	 */
126 	int err = cpu_set_onfault(&arm_efirt_state.faultbuf);
127 	if (err)
128 		return err;
129 
130 	if (efi_userva) {
131 		if ((l->l_flag & LW_SYSTEM) == 0) {
132 			pmap_deactivate(l);
133 		}
134 		pmap_activate_efirt();
135 	}
136 
137 	return 0;
138 }
139 
140 void
141 arm_efirt_md_exit(void)
142 {
143 	struct lwp * const l = curlwp;
144 
145 	if (efi_userva) {
146 		pmap_deactivate_efirt();
147 		if ((l->l_flag & LW_SYSTEM) == 0) {
148 			pmap_activate(l);
149 		}
150 	}
151 
152 	/* Disable FP access */
153 	reg_cpacr_el1_write(CPACR_FPEN_NONE);
154 	isb();
155 
156 	/* Restore FPU state */
157 	if (arm_efirt_state.fpu_used)
158 		fpu_load(l);
159 
160 	/* Remove custom fault handler */
161 	cpu_unset_onfault();
162 
163 	kpreempt_enable();
164 }
165