1 /* $NetBSD: vm_machdep.c,v 1.108 2023/03/20 11:19:29 hannken Exp $ */
2
3 /*
4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley.
12 *
13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Lawrence Berkeley Laboratory.
17 * This product includes software developed by Harvard University.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 * must display the following acknowledgement:
29 * This product includes software developed by Harvard University.
30 * This product includes software developed by the University of
31 * California, Berkeley and its contributors.
32 * 4. Neither the name of the University nor the names of its contributors
33 * may be used to endorse or promote products derived from this software
34 * without specific prior written permission.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * @(#)vm_machdep.c 8.2 (Berkeley) 9/23/93
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.108 2023/03/20 11:19:29 hannken Exp $");
53
54 #include "opt_multiprocessor.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/proc.h>
59 #include <sys/core.h>
60 #include <sys/kmem.h>
61 #include <sys/buf.h>
62 #include <sys/exec.h>
63 #include <sys/vnode.h>
64 #include <sys/cpu.h>
65
66 #include <uvm/uvm_extern.h>
67
68 #include <machine/cpu.h>
69 #include <machine/frame.h>
70 #include <machine/pcb.h>
71 #include <machine/trap.h>
72
73 #include <sparc/sparc/cpuvar.h>
74
75 /*
76 * Map a user I/O request into kernel virtual address space.
77 * Note: the pages are already locked by uvm_vslock(), so we
78 * do not need to pass an access_type to pmap_enter().
79 */
80 int
vmapbuf(struct buf * bp,vsize_t len)81 vmapbuf(struct buf *bp, vsize_t len)
82 {
83 struct pmap *upmap, *kpmap;
84 vaddr_t uva; /* User VA (map from) */
85 vaddr_t kva; /* Kernel VA (new to) */
86 paddr_t pa; /* physical address */
87 vsize_t off;
88
89 if ((bp->b_flags & B_PHYS) == 0)
90 panic("vmapbuf");
91
92 /*
93 * XXX: It might be better to round/trunc to a
94 * segment boundary to avoid VAC problems!
95 */
96 bp->b_saveaddr = bp->b_data;
97 uva = trunc_page((vaddr_t)bp->b_data);
98 off = (vaddr_t)bp->b_data - uva;
99 len = round_page(off + len);
100 kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
101 bp->b_data = (void *)(kva + off);
102
103 /*
104 * We have to flush any write-back cache on the
105 * user-space mappings so our new mappings will
106 * have the correct contents.
107 */
108 if (CACHEINFO.c_vactype != VAC_NONE)
109 cache_flush((void *)uva, len);
110
111 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
112 kpmap = vm_map_pmap(kernel_map);
113 do {
114 if (pmap_extract(upmap, uva, &pa) == false)
115 panic("vmapbuf: null page frame");
116 /* Now map the page into kernel space. */
117 pmap_enter(kpmap, kva, pa,
118 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
119 uva += PAGE_SIZE;
120 kva += PAGE_SIZE;
121 len -= PAGE_SIZE;
122 } while (len);
123 pmap_update(kpmap);
124
125 return 0;
126 }
127
128 /*
129 * Unmap a previously-mapped user I/O request.
130 */
131 void
vunmapbuf(struct buf * bp,vsize_t len)132 vunmapbuf(struct buf *bp, vsize_t len)
133 {
134 vaddr_t kva;
135 vsize_t off;
136
137 if ((bp->b_flags & B_PHYS) == 0)
138 panic("vunmapbuf");
139
140 kva = trunc_page((vaddr_t)bp->b_data);
141 off = (vaddr_t)bp->b_data - kva;
142 len = round_page(off + len);
143 pmap_remove(vm_map_pmap(kernel_map), kva, kva + len);
144 pmap_update(vm_map_pmap(kernel_map));
145 uvm_km_free(kernel_map, kva, len, UVM_KMF_VAONLY);
146 bp->b_data = bp->b_saveaddr;
147 bp->b_saveaddr = NULL;
148
149 #if 0 /* XXX: The flush above is sufficient, right? */
150 if (CACHEINFO.c_vactype != VAC_NONE)
151 cpuinfo.cache_flush(bp->b_data, len);
152 #endif
153 }
154
155
156 void
cpu_proc_fork(struct proc * p1,struct proc * p2)157 cpu_proc_fork(struct proc *p1, struct proc *p2)
158 {
159
160 p2->p_md.md_flags = p1->p_md.md_flags;
161 }
162
163
164 /*
165 * The offset of the topmost frame in the kernel stack.
166 */
167 #define TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-sizeof(struct frame))
168
169 /*
170 * Finish a fork operation, with process l2 nearly set up.
171 * Copy and update the pcb and trap frame, making the child ready to run.
172 *
173 * Rig the child's kernel stack so that it will start out in
174 * lwp_trampoline() and call child_return() with l2 as an
175 * argument. This causes the newly-created child process to go
176 * directly to user level with an apparent return value of 0 from
177 * fork(), while the parent process returns normally.
178 *
179 * l1 is the process being forked; if l1 == &lwp0, we are creating
180 * a kernel thread, and the return path and argument are specified with
181 * `func' and `arg'.
182 *
183 * If an alternate user-level stack is requested (with non-zero values
184 * in both the stack and stacksize args), set up the user stack pointer
185 * accordingly.
186 */
187 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)188 cpu_lwp_fork(struct lwp *l1, struct lwp *l2,
189 void *stack, size_t stacksize,
190 void (*func)(void *), void *arg)
191 {
192 struct pcb *opcb = lwp_getpcb(l1);
193 struct pcb *npcb = lwp_getpcb(l2);
194 struct trapframe *tf2;
195 struct rwindow *rp;
196
197 /*
198 * Save all user registers to l1's stack or, in the case of
199 * user registers and invalid stack pointers, to opcb.
200 * We then copy the whole pcb to l2; when switch() selects l2
201 * to run, it will run at the `lwp_trampoline' stub, rather
202 * than returning at the copying code below.
203 *
204 * If process l1 has an FPU state, we must copy it. If it is
205 * the FPU user, we must save the FPU state first.
206 */
207
208 if (l1 == curlwp) {
209 write_user_windows();
210 opcb->pcb_psr = getpsr();
211 }
212 #ifdef DIAGNOSTIC
213 else if (l1 != &lwp0) /* XXX is this valid? */
214 panic("cpu_lwp_fork: curlwp");
215 #endif
216
217 memcpy((void *)npcb, (void *)opcb, sizeof(struct pcb));
218 if (l1->l_md.md_fpstate != NULL) {
219 struct cpu_info *cpi;
220 int s;
221
222 l2->l_md.md_fpstate =
223 kmem_alloc(sizeof(struct fpstate), KM_SLEEP);
224
225 FPU_LOCK(s);
226 if ((cpi = l1->l_md.md_fpu) != NULL) {
227 if (cpi->fplwp != l1)
228 panic("FPU(%d): fplwp %p",
229 cpi->ci_cpuid, cpi->fplwp);
230 if (l1 == cpuinfo.fplwp)
231 savefpstate(l1->l_md.md_fpstate);
232 #if defined(MULTIPROCESSOR)
233 else
234 XCALL1(ipi_savefpstate, l1->l_md.md_fpstate,
235 1 << cpi->ci_cpuid);
236 #endif
237 }
238 memcpy(l2->l_md.md_fpstate, l1->l_md.md_fpstate,
239 sizeof(struct fpstate));
240 FPU_UNLOCK(s);
241 } else
242 l2->l_md.md_fpstate = NULL;
243
244 l2->l_md.md_fpu = NULL;
245
246 /*
247 * Setup (kernel) stack frame that will by-pass the child
248 * out of the kernel. (The trap frame invariably resides at
249 * the tippity-top of the u. area.)
250 */
251 tf2 = l2->l_md.md_tf = (struct trapframe *)
252 ((int)npcb + USPACE - sizeof(*tf2));
253
254 /* Copy parent's trapframe */
255 *tf2 = *(struct trapframe *)((int)opcb + USPACE - sizeof(*tf2));
256
257 /*
258 * If specified, give the child a different stack.
259 */
260 if (stack != NULL)
261 tf2->tf_out[6] = (u_int)stack + stacksize;
262
263 /*
264 * The fork system call always uses the old system call
265 * convention; clear carry and skip trap instruction as
266 * in syscall().
267 * note: lwp_trampoline() sets a fresh psr when returning
268 * to user mode.
269 */
270 /*tf2->tf_psr &= ~PSR_C; -* success */
271
272 /* Set return values in child mode */
273 tf2->tf_out[0] = 0;
274 tf2->tf_out[1] = 1;
275
276 /* Construct kernel frame to return to in cpu_switch() */
277 rp = (struct rwindow *)((u_int)npcb + TOPFRAMEOFF);
278 /**rp = *(struct rwindow *)((u_int)opcb + TOPFRAMEOFF);*/
279 rp->rw_local[0] = (int)func; /* Function to call */
280 rp->rw_local[1] = (int)arg; /* and its argument */
281 rp->rw_local[2] = (int)l2; /* new LWP */
282
283 npcb->pcb_pc = (int)lwp_trampoline - 8;
284 npcb->pcb_sp = (int)rp;
285 npcb->pcb_psr &= ~PSR_CWP; /* Run in window #0 */
286 npcb->pcb_wim = 1; /* Fence at window #1 */
287 }
288
289 /*
290 * Cleanup FPU state.
291 */
292 void
cpu_lwp_free(struct lwp * l,int proc)293 cpu_lwp_free(struct lwp *l, int proc)
294 {
295 struct fpstate *fs;
296
297 if ((fs = l->l_md.md_fpstate) != NULL) {
298 struct cpu_info *cpi;
299 int s;
300
301 FPU_LOCK(s);
302 if ((cpi = l->l_md.md_fpu) != NULL) {
303 if (cpi->fplwp != l)
304 panic("FPU(%d): fplwp %p",
305 cpi->ci_cpuid, cpi->fplwp);
306 if (l == cpuinfo.fplwp)
307 savefpstate(fs);
308 #if defined(MULTIPROCESSOR)
309 else
310 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
311 #endif
312 cpi->fplwp = NULL;
313 }
314 l->l_md.md_fpu = NULL;
315 FPU_UNLOCK(s);
316 }
317 }
318
319 void
cpu_lwp_free2(struct lwp * l)320 cpu_lwp_free2(struct lwp *l)
321 {
322 struct fpstate *fs;
323
324 if ((fs = l->l_md.md_fpstate) != NULL)
325 kmem_free(fs, sizeof(struct fpstate));
326 }
327
328 int
cpu_lwp_setprivate(lwp_t * l,void * addr)329 cpu_lwp_setprivate(lwp_t *l, void *addr)
330 {
331 struct trapframe *tf = l->l_md.md_tf;
332
333 tf->tf_global[7] = (uintptr_t)addr;
334
335 return 0;
336 }
337