1 /* $NetBSD: vm_machdep.c,v 1.15 2023/12/20 06:13:58 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_compat_netbsd32.h"
33 #include "opt_ddb.h"
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.15 2023/12/20 06:13:58 thorpej Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/cpu.h>
43 #include <sys/buf.h>
44 #include <sys/exec.h>
45 #include <sys/syslog.h>
46
47 #include <uvm/uvm_extern.h>
48
49 #include <aarch64/pcb.h>
50 #include <aarch64/frame.h>
51 #include <aarch64/machdep.h>
52 #include <aarch64/armreg.h>
53
54 /*
55 * Special compilation symbols:
56 *
57 * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
58 * on forking and check the pattern on exit, reporting
59 * the amount of stack used.
60 */
61
62 void
cpu_proc_fork(struct proc * p1,struct proc * p2)63 cpu_proc_fork(struct proc *p1, struct proc *p2)
64 {
65 }
66
67 /*
68 * Finish a fork operation, with LWP l2 nearly set up.
69 *
70 * Copy and update the pcb and trapframe, making the child ready to run.
71 *
72 * Rig the child's kernel stack so that it will start out in
73 * lwp_trampoline() which will call the specified func with the argument arg.
74 *
75 * If an alternate user-level stack is requested (with non-zero values
76 * in both the stack and stacksize args), set up the user stack pointer
77 * accordingly.
78 */
79 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)80 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
81 void (*func)(void *), void *arg)
82 {
83 const struct pcb * const pcb1 = lwp_getpcb(l1);
84 struct pcb * const pcb2 = lwp_getpcb(l2);
85
86 #if 0
87 printf("cpu_lwp_fork: lwp1=%p, lwp2=%p, curlwp=%p, lwp0=%p\n",
88 l1, l2, curlwp, &lwp0);
89 #endif
90
91 /* Copy the pcb */
92 *pcb2 = *pcb1;
93
94 /*
95 * Disable FP for a newly created LWP but remember if the
96 * FP state is valid.
97 */
98 l2->l_md.md_cpacr = CPACR_FPEN_NONE;
99 KASSERT(l2->l_md.md_astpending == 0);
100
101 #ifdef ARMV83_PAC
102 /*
103 * Temporary kern PAC key, not really strong, but better than zero.
104 * The real key gets generated by the new thread in lwp_trampoline().
105 *
106 * The reason we defer the generation, is because cpu_lwp_fork() can
107 * be called early at boot time, when the CPRNG subsystem hasn't yet
108 * been initialized.
109 */
110 l2->l_md.md_ia_kern[0] = (uintptr_t)l2;
111 l2->l_md.md_ia_kern[1] = (uintptr_t)stack;
112
113 /* inherit user PAC key */
114 memcpy(l2->l_md.md_ia_user, l1->l_md.md_ia_user,
115 sizeof(l2->l_md.md_ia_user));
116 memcpy(l2->l_md.md_ib_user, l1->l_md.md_ib_user,
117 sizeof(l2->l_md.md_ib_user));
118 memcpy(l2->l_md.md_da_user, l1->l_md.md_da_user,
119 sizeof(l2->l_md.md_da_user));
120 memcpy(l2->l_md.md_db_user, l1->l_md.md_db_user,
121 sizeof(l2->l_md.md_db_user));
122 memcpy(l2->l_md.md_ga_user, l1->l_md.md_ga_user,
123 sizeof(l2->l_md.md_ga_user));
124 #endif
125
126 /*
127 * Set up the kernel stack for the process.
128 * Note: this stack is not in use if we are forking from p1
129 */
130 vaddr_t uv = uvm_lwp_getuarea(l2);
131
132 #ifdef STACKCHECKS
133 #define PCB_END(l) ((char *)lwp_getpcb((l)) + sizeof(struct pcb))
134 #define UAREA_END(l) ((char *)uvm_lwp_getuarea((l)) + USPACE)
135 /* fill 0xdd for STACKCHECKS */
136 memset(PCB_END(l2), 0xdd, UAREA_END(l2) - PCB_END(l2));
137 printf("lwp %p: pcb=%p, stack=%p-%p\n", l2, lwp_getpcb(l2),
138 PCB_END(l2), UAREA_END(l2));
139 #endif
140
141 struct trapframe * const utf = (struct trapframe *)(uv + USPACE) - 1;
142 l2->l_md.md_utf = utf;
143
144 *utf = *l1->l_md.md_utf;
145
146 /*
147 * If specified, give the child a different stack (make sure it's
148 * 16- or 8-byte aligned for 64- or 32-bit processes, respectively).
149 */
150 if (stack != NULL) {
151 utf->tf_sp = (vaddr_t)(stack) + stacksize;
152 #ifdef COMPAT_NETBSD32
153 if (__predict_false(l2->l_proc->p_flag & PK_32)) {
154 utf->tf_sp &= -8;
155 utf->tf_reg[13] = utf->tf_sp;
156 } else
157 #endif
158 utf->tf_sp &= -16;
159 }
160
161 /* build a new switchframe */
162 struct trapframe * const ktf = utf - 1;
163 ktf->tf_reg[27] = (uint64_t)func;
164 ktf->tf_reg[28] = (uint64_t)arg;
165 ktf->tf_lr = (uintptr_t)lwp_trampoline;
166 #ifdef DDB
167 ktf->tf_reg[29] = (uint64_t)utf;
168 ktf->tf_pc = (uint64_t)&&backtrace_here;
169 ktf->tf_sp = 0; /* mark as switchframe */
170 backtrace_here:
171 #endif
172
173 pcb2->pcb_tf = ktf;
174 }
175
176 /*
177 * cpu_exit is called as the last action during exit.
178 *
179 * We clean up a little and then call switch_exit() with the old proc as an
180 * argument. switch_exit() first switches to lwp0's context, and finally
181 * jumps into switch() to wait for another process to wake up.
182 */
183 void
cpu_lwp_free(struct lwp * l,int proc)184 cpu_lwp_free(struct lwp *l, int proc)
185 {
186 #ifdef STACKCHECKS
187 /* Report how much stack has been used - debugging */
188 u_char *stop, *sbottom, *ptr;
189 u_int cnt;
190
191 stop = PCB_END(l);
192 sbottom = UAREA_END(l);
193 for (cnt = 0, ptr = stop; *ptr == 0xdd && ptr <= sbottom; cnt++, ptr++)
194 ;
195 log(LOG_INFO, "lwp %p: %u/%ld bytes are used for EL1 stack\n",
196 l, cnt, sbottom - stop);
197 #endif
198 }
199
200 void
cpu_lwp_free2(struct lwp * l)201 cpu_lwp_free2(struct lwp *l)
202 {
203 }
204
205 /*
206 * Map a user I/O request into kernel virtual address space.
207 * Note: the pages are already locked by uvm_vslock(), so we
208 * do not need to pass an access_type to pmap_enter().
209 */
210 int
vmapbuf(struct buf * bp,vsize_t len)211 vmapbuf(struct buf *bp, vsize_t len)
212 {
213 vaddr_t faddr, taddr, off;
214 paddr_t fpa;
215
216
217 if ((bp->b_flags & B_PHYS) == 0)
218 panic("vmapbuf");
219
220 bp->b_saveaddr = bp->b_data;
221 faddr = trunc_page((vaddr_t)bp->b_data);
222 off = (vaddr_t)bp->b_data - faddr;
223 len = round_page(off + len);
224 taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
225 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
226 bp->b_data = (void *)(taddr + off);
227
228 /*
229 * The region is locked, so we expect that pmap_extract() will return
230 * true.
231 */
232 while (len) {
233 (void)pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
234 faddr, &fpa);
235 pmap_enter(pmap_kernel(), taddr, fpa,
236 VM_PROT_READ | VM_PROT_WRITE,
237 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
238 faddr += PAGE_SIZE;
239 taddr += PAGE_SIZE;
240 len -= PAGE_SIZE;
241 }
242 pmap_update(pmap_kernel());
243
244 return 0;
245 }
246
247 /*
248 * Unmap a previously-mapped user I/O request.
249 */
250 void
vunmapbuf(struct buf * bp,vsize_t len)251 vunmapbuf(struct buf *bp, vsize_t len)
252 {
253 vaddr_t addr, off;
254
255 if ((bp->b_flags & B_PHYS) == 0)
256 panic("vunmapbuf");
257
258 /*
259 * Make sure the cache does not have dirty data for the
260 * pages we had mapped.
261 */
262 addr = trunc_page((vaddr_t)bp->b_data);
263 off = (vaddr_t)bp->b_data - addr;
264 len = round_page(off + len);
265
266 pmap_remove(pmap_kernel(), addr, addr + len);
267 pmap_update(pmap_kernel());
268 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
269 bp->b_data = bp->b_saveaddr;
270 bp->b_saveaddr = 0;
271 }
272