1 /* $NetBSD: vm_machdep.c,v 1.84 2023/12/20 15:34:45 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5 * Copyright (c) 1982, 1986 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department, and William Jolitz.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
37 */
38
39 /*-
40 * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
41 * Copyright (c) 1989, 1990 William Jolitz
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * the Systems Programming Group of the University of Utah Computer
46 * Science Department, and William Jolitz.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution.
56 * 3. All advertising materials mentioning features or use of this software
57 * must display the following acknowledgement:
58 * This product includes software developed by the University of
59 * California, Berkeley and its contributors.
60 * 4. Neither the name of the University nor the names of its contributors
61 * may be used to endorse or promote products derived from this software
62 * without specific prior written permission.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * SUCH DAMAGE.
75 *
76 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
77 */
78
79 /*
80 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.84 2023/12/20 15:34:45 thorpej Exp $");
85
86 #include "opt_kstack_debug.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/vnode.h>
92 #include <sys/buf.h>
93 #include <sys/core.h>
94 #include <sys/exec.h>
95 #include <sys/ptrace.h>
96 #include <sys/syscall.h>
97 #include <sys/kauth.h>
98 #include <sys/ktrace.h>
99
100 #include <dev/mm.h>
101
102 #include <uvm/uvm_extern.h>
103 #include <uvm/uvm_page.h>
104 #include <uvm/uvm_physseg.h>
105
106 #include <sh3/locore.h>
107 #include <sh3/cpu.h>
108 #include <sh3/pcb.h>
109 #include <sh3/mmu.h>
110 #include <sh3/cache.h>
111 #include <sh3/userret.h>
112
113 extern void lwp_trampoline(void);
114
115 static void sh3_setup_uarea(struct lwp *);
116
117 /*
118 * Finish a fork operation, with lwp l2 nearly set up. Copy and
119 * update the pcb and trap frame, making the child ready to run.
120 *
121 * Rig the child's kernel stack so that it will start out in
122 * lwp_trampoline() and call child_return() with l2 as an argument.
123 * This causes the newly-created lwp to go directly to user level with
124 * an apparent return value of 0 from fork(), while the parent lwp
125 * returns normally.
126 *
127 * l1 is the lwp being forked; if l1 == &lwp0, we are creating a
128 * kernel thread, and the return path and argument are specified with
129 * `func' and `arg'.
130 *
131 * If an alternate user-level stack is requested (with non-zero values
132 * in both the stack and stacksize args), set up the user stack
133 * pointer accordingly.
134 */
135 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)136 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack,
137 size_t stacksize, void (*func)(void *), void *arg)
138 {
139 struct pcb *pcb;
140 struct switchframe *sf;
141
142 #if 0 /* FIXME: probably wrong for yamt-idlelwp */
143 KDASSERT(l1 == curlwp || l1 == &lwp0);
144 #endif
145
146 sh3_setup_uarea(l2);
147
148 l2->l_md.md_flags = l1->l_md.md_flags;
149 KASSERT(l2->l_md.md_astpending == 0);
150
151 /* Copy user context, may be give a different stack */
152 memcpy(l2->l_md.md_regs, l1->l_md.md_regs, sizeof(struct trapframe));
153 if (stack != NULL)
154 l2->l_md.md_regs->tf_r15 = (u_int)stack + stacksize;
155
156 /* When l2 is switched to, jump to the trampoline */
157 pcb = lwp_getpcb(l2);
158 sf = &pcb->pcb_sf;
159 sf->sf_pr = (int)lwp_trampoline;
160 sf->sf_r10 = (int)l2; /* "new" lwp for lwp_startup() */
161 sf->sf_r11 = (int)arg; /* hook function/argument */
162 sf->sf_r12 = (int)func;
163 }
164
165 static void
sh3_setup_uarea(struct lwp * l)166 sh3_setup_uarea(struct lwp *l)
167 {
168 struct pcb *pcb;
169 struct trapframe *tf;
170 struct switchframe *sf;
171 vaddr_t uv, spbase, fptop;
172 #define P1ADDR(x) (SH3_PHYS_TO_P1SEG(*__pmap_kpte_lookup(x) & PG_PPN))
173
174 pcb = lwp_getpcb(l);
175 pcb->pcb_onfault = NULL;
176 #ifdef SH3
177 /*
178 * Accessing context store space must not cause exceptions.
179 * SH4 can make wired TLB entries so P3 address for PCB is ok.
180 * SH3 cannot, so we need to convert to P1. P3/P1 conversion
181 * doesn't cause virtual-aliasing.
182 */
183 if (CPU_IS_SH3)
184 pcb = (struct pcb *)P1ADDR((vaddr_t)pcb);
185 #endif /* SH3 */
186 l->l_md.md_pcb = pcb;
187
188 /* stack for trapframes */
189 fptop = (vaddr_t)pcb + PAGE_SIZE;
190 tf = (struct trapframe *)fptop - 1;
191 l->l_md.md_regs = tf;
192
193 /* set up the kernel stack pointer */
194 uv = uvm_lwp_getuarea(l);
195 spbase = uv + PAGE_SIZE;
196 #ifdef P1_STACK
197 /*
198 * wbinv u-area to avoid cache-aliasing, since kernel stack
199 * is accessed from P1 instead of P3.
200 */
201 if (SH_HAS_VIRTUAL_ALIAS)
202 sh_dcache_wbinv_range(uv, USPACE);
203 spbase = P1ADDR(spbase);
204 #else /* !P1_STACK */
205 #ifdef SH4
206 /* Prepare u-area PTEs */
207 if (CPU_IS_SH4)
208 sh4_switch_setup(l);
209 #endif
210 #endif /* !P1_STACK */
211
212 #ifdef KSTACK_DEBUG
213 /* Fill magic number for tracking */
214 memset((char *)fptop - PAGE_SIZE + sizeof(struct pcb), 0x5a,
215 PAGE_SIZE - sizeof(struct pcb));
216 memset((char *)spbase, 0xa5, (USPACE - PAGE_SIZE));
217 memset(&pcb->pcb_sf, 0xb4, sizeof(struct switchframe));
218 #endif /* KSTACK_DEBUG */
219
220 /* Setup kernel stack and trapframe stack */
221 sf = &pcb->pcb_sf;
222 sf->sf_r6_bank = (vaddr_t)tf;
223 sf->sf_r7_bank = spbase + USPACE - PAGE_SIZE;
224 sf->sf_r15 = sf->sf_r7_bank;
225
226 /*
227 * Enable interrupts when switch frame is restored, since
228 * kernel thread begins to run without restoring trapframe.
229 */
230 sf->sf_sr = PSL_MD; /* kernel mode, interrupt enable */
231 }
232
233
234 /*
235 * fork &co pass this routine to newlwp to finish off child creation
236 * (see cpu_lwp_fork above and lwp_trampoline for details).
237 *
238 * When this function returns, new lwp returns to user mode.
239 */
240 void
md_child_return(struct lwp * l)241 md_child_return(struct lwp *l)
242 {
243 struct trapframe *tf = l->l_md.md_regs;
244
245 tf->tf_r0 = 0; /* fork(2) returns 0 in child */
246 tf->tf_ssr |= PSL_TBIT; /* syscall succeeded */
247
248 userret(l);
249 }
250
251 /*
252 * Process the tail end of a posix_spawn() for the child.
253 */
254 void
cpu_spawn_return(struct lwp * l)255 cpu_spawn_return(struct lwp *l)
256 {
257
258 userret(l);
259 }
260
261 /*
262 * struct emul e_startlwp (for _lwp_create(2))
263 */
264 void
startlwp(void * arg)265 startlwp(void *arg)
266 {
267 ucontext_t *uc = arg;
268 lwp_t *l = curlwp;
269 int error __diagused;
270
271 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
272 KASSERT(error == 0);
273
274 kmem_free(uc, sizeof(ucontext_t));
275 userret(l);
276 }
277
278 /*
279 * Exit hook
280 */
281 void
cpu_lwp_free(struct lwp * l,int proc)282 cpu_lwp_free(struct lwp *l, int proc)
283 {
284
285 /* Nothing to do */
286 }
287
288
289 /*
290 * lwp_free() hook
291 */
292 void
cpu_lwp_free2(struct lwp * l)293 cpu_lwp_free2(struct lwp *l)
294 {
295
296 /* Nothing to do */
297 }
298
299 /*
300 * Map an IO request into kernel virtual address space. Requests fall into
301 * one of five categories:
302 *
303 * B_PHYS|B_UAREA: User u-area swap.
304 * Address is relative to start of u-area.
305 * B_PHYS|B_PAGET: User page table swap.
306 * Address is a kernel VA in usrpt (Usrptmap).
307 * B_PHYS|B_DIRTY: Dirty page push.
308 * Address is a VA in proc2's address space.
309 * B_PHYS|B_PGIN: Kernel pagein of user pages.
310 * Address is VA in user's address space.
311 * B_PHYS: User "raw" IO request.
312 * Address is VA in user's address space.
313 *
314 * All requests are (re)mapped into kernel VA space via the phys_map
315 * (a name with only slightly more meaning than "kernel_map")
316 */
317
318 int
vmapbuf(struct buf * bp,vsize_t len)319 vmapbuf(struct buf *bp, vsize_t len)
320 {
321 vaddr_t faddr, taddr, off;
322 paddr_t fpa;
323 pmap_t kpmap, upmap;
324
325 if ((bp->b_flags & B_PHYS) == 0)
326 panic("vmapbuf");
327 bp->b_saveaddr = bp->b_data;
328 faddr = trunc_page((vaddr_t)bp->b_data);
329 off = (vaddr_t)bp->b_data - faddr;
330 len = round_page(off + len);
331 taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
332 bp->b_data = (void *)(taddr + off);
333 /*
334 * The region is locked, so we expect that pmap_pte() will return
335 * non-NULL.
336 * XXX: unwise to expect this in a multithreaded environment.
337 * anything can happen to a pmap between the time we lock a
338 * region, release the pmap lock, and then relock it for
339 * the pmap_extract().
340 *
341 * no need to flush TLB since we expect nothing to be mapped
342 * where we just allocated (TLB will be flushed when our
343 * mapping is removed).
344 */
345 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
346 kpmap = vm_map_pmap(phys_map);
347 while (len) {
348 pmap_extract(upmap, faddr, &fpa);
349 pmap_enter(kpmap, taddr, fpa,
350 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
351 faddr += PAGE_SIZE;
352 taddr += PAGE_SIZE;
353 len -= PAGE_SIZE;
354 }
355 pmap_update(kpmap);
356
357 return 0;
358 }
359
360 /*
361 * Free the io map PTEs associated with this IO operation.
362 * We also invalidate the TLB entries and restore the original b_addr.
363 */
364 void
vunmapbuf(struct buf * bp,vsize_t len)365 vunmapbuf(struct buf *bp, vsize_t len)
366 {
367 vaddr_t addr, off;
368 pmap_t kpmap;
369
370 if ((bp->b_flags & B_PHYS) == 0)
371 panic("vunmapbuf");
372 addr = trunc_page((vaddr_t)bp->b_data);
373 off = (vaddr_t)bp->b_data - addr;
374 len = round_page(off + len);
375 kpmap = vm_map_pmap(phys_map);
376 pmap_remove(kpmap, addr, addr + len);
377 pmap_update(kpmap);
378 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
379 bp->b_data = bp->b_saveaddr;
380 bp->b_saveaddr = 0;
381 }
382
383 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)384 mm_md_physacc(paddr_t pa, vm_prot_t prot)
385 {
386
387 if (atop(pa) < uvm_physseg_get_start(uvm_physseg_get_first()) || PHYS_TO_VM_PAGE(pa) != NULL) {
388 return 0;
389 }
390 return EFAULT;
391 }
392
393 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)394 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
395 {
396 const vaddr_t va = (vaddr_t)ptr;
397
398 if (va < SH3_P1SEG_BASE) {
399 return EFAULT;
400 }
401 if (va < SH3_P2SEG_BASE) {
402 *handled = true;
403 return 0;
404 }
405 if (va < SH3_P3SEG_BASE) {
406 return EFAULT;
407 }
408 *handled = false;
409 return 0;
410 }
411
412 bool
mm_md_direct_mapped_io(void * ptr,paddr_t * paddr)413 mm_md_direct_mapped_io(void *ptr, paddr_t *paddr)
414 {
415 vaddr_t va = (vaddr_t)ptr;
416
417 if (va >= SH3_P1SEG_BASE && va < SH3_P2SEG_BASE) {
418 *paddr = SH3_P1SEG_TO_PHYS(va);
419 return true;
420 }
421 return false;
422 }
423
424 bool
mm_md_direct_mapped_phys(paddr_t paddr,vaddr_t * vaddr)425 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
426 {
427
428 *vaddr = SH3_PHYS_TO_P1SEG(paddr);
429 return true;
430 }
431