xref: /netbsd-src/sys/arch/aarch64/aarch64/efi_machdep.c (revision 9b383adc631534b9ef7e503863f9e314ac60e151)
1 /* $NetBSD: efi_machdep.c,v 1.14 2023/07/10 07:00:11 rin Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jared McNeill <jmcneill@invisible.ca>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: efi_machdep.c,v 1.14 2023/07/10 07:00:11 rin Exp $");
34 
35 #include <sys/param.h>
36 
37 #include <uvm/uvm_extern.h>
38 
39 #include <arm/cpufunc.h>
40 #include <arm/efirt.h>
41 
42 #include <arm/arm/efi_runtime.h>
43 
44 #include <aarch64/machdep.h>
45 
46 static struct {
47 	struct faultbuf	faultbuf;
48 	bool		fpu_used;
49 } arm_efirt_state;
50 
51 static bool efi_userva = true;
52 
53 void
cpu_efirt_map_range(vaddr_t va,paddr_t pa,size_t sz,enum cpu_efirt_mem_type type)54 cpu_efirt_map_range(vaddr_t va, paddr_t pa, size_t sz,
55     enum cpu_efirt_mem_type type)
56 {
57 	int flags = 0;
58 	int prot = 0;
59 
60 	switch (type) {
61 	case ARM_EFIRT_MEM_CODE:
62 		/* need write permission because fw devs */
63 		prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
64 		break;
65 	case ARM_EFIRT_MEM_DATA:
66 		prot = VM_PROT_READ | VM_PROT_WRITE;
67 		break;
68 	case ARM_EFIRT_MEM_MMIO:
69 		prot = VM_PROT_READ | VM_PROT_WRITE;
70 		flags = PMAP_DEV;
71 		break;
72 	default:
73 		panic("%s: unsupported type %d", __func__, type);
74 	}
75 
76 	/* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */
77 	bool kva = (va & AARCH64_ADDRTOP_TAG) != 0;
78 	if (kva) {
79 		if (va < EFI_RUNTIME_VA ||
80 		    va >= EFI_RUNTIME_VA + EFI_RUNTIME_SIZE) {
81 			printf("Incorrect EFI mapping address %" PRIxVADDR "\n", va);
82 		    return;
83 		}
84 		efi_userva = false;
85 	} else {
86 		if (!efi_userva) {
87 			printf("Can't mix EFI RT address spaces\n");
88 			return;
89 		}
90 	}
91 
92 	while (sz != 0) {
93 		if (kva) {
94 			pmap_kenter_pa(va, pa, prot, flags);
95 		} else {
96 			pmap_enter(pmap_efirt(), va, pa, prot, flags | PMAP_WIRED);
97 		}
98 		va += PAGE_SIZE;
99 		pa += PAGE_SIZE;
100 		sz -= PAGE_SIZE;
101 	}
102 	if (kva)
103 		pmap_update(pmap_kernel());
104 	else
105 		pmap_update(pmap_efirt());
106 }
107 
108 int
arm_efirt_md_enter(void)109 arm_efirt_md_enter(void)
110 {
111 	kpreempt_disable();
112 
113 	struct lwp * const l = curlwp;
114 
115 	/* Save FPU state */
116 	arm_efirt_state.fpu_used = fpu_used_p(l) != 0;
117 	if (arm_efirt_state.fpu_used)
118 		fpu_save(l);
119 
120 	/* Enable FP access (AArch64 UEFI calling convention) */
121 	reg_cpacr_el1_write(CPACR_FPEN_ALL);
122 	isb();
123 
124 	/*
125 	 * Install custom fault handler. EFI lock is held across calls so
126 	 * shared faultbuf is safe here.
127 	 */
128 	int err = cpu_set_onfault(&arm_efirt_state.faultbuf);
129 	if (err)
130 		return err;
131 
132 	if (efi_userva) {
133 		if ((l->l_flag & LW_SYSTEM) == 0) {
134 			pmap_deactivate(l);
135 		}
136 		pmap_activate_efirt();
137 	}
138 
139 	return 0;
140 }
141 
142 void
arm_efirt_md_exit(void)143 arm_efirt_md_exit(void)
144 {
145 	struct lwp * const l = curlwp;
146 
147 	if (efi_userva) {
148 		pmap_deactivate_efirt();
149 		if ((l->l_flag & LW_SYSTEM) == 0) {
150 			pmap_activate(l);
151 		}
152 	}
153 
154 	/* Disable FP access */
155 	reg_cpacr_el1_write(CPACR_FPEN_NONE);
156 	isb();
157 
158 	/* Restore FPU state */
159 	if (arm_efirt_state.fpu_used)
160 		fpu_load(l);
161 
162 	/* Remove custom fault handler */
163 	cpu_unset_onfault();
164 
165 	kpreempt_enable();
166 }
167