1 /* $OpenBSD: vm_machdep.c,v 1.53 2024/05/21 23:16:06 jsg Exp $ */
2 /* $NetBSD: vm_machdep.c,v 1.55 2000/03/29 03:49:48 simonb Exp $ */
3
4 /*
5 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
6 * All rights reserved.
7 *
8 * Author: Chris G. Demetriou
9 *
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
15 *
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 *
20 * Carnegie Mellon requests users of this software to return to
21 *
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/signalvar.h>
35 #include <sys/malloc.h>
36 #include <sys/buf.h>
37 #include <sys/vnode.h>
38 #include <sys/user.h>
39 #include <sys/exec.h>
40
41 #include <uvm/uvm_extern.h>
42
43 #include <machine/cpu.h>
44 #include <machine/pmap.h>
45 #include <machine/reg.h>
46
47
48 /*
49 * cpu_exit is called as the last action during exit.
50 */
51 void
cpu_exit(p)52 cpu_exit(p)
53 struct proc *p;
54 {
55
56 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
57 fpusave_proc(p, 0);
58
59 /*
60 * Deactivate the exiting address space before the vmspace
61 * is freed. Note that we will continue to run on this
62 * vmspace's context until the switch to idle in sched_exit().
63 */
64 pmap_deactivate(p);
65 sched_exit(p);
66 /* NOTREACHED */
67 }
68
69 /*
70 * Finish a fork operation, with process p2 nearly set up.
71 * Copy and update the pcb and trap frame, making the child ready to run.
72 *
73 * Rig the child's kernel stack so that it will start out in
74 * proc_trampoline() and call 'func' with 'arg' as an argument.
75 * For normal processes this is child_return(), which causes the
76 * child to go directly to user level with an apparent return value
77 * of 0 from fork(), while the parent process returns normally.
78 * For kernel threads this will be a function that never returns.
79 *
80 * An alternate user-level stack or TCB can be requested by passing
81 * a non-NULL value; these are poked into the PCB so they're in
82 * effect at the initial return to userspace.
83 */
84 void
cpu_fork(struct proc * p1,struct proc * p2,void * stack,void * tcb,void (* func)(void *),void * arg)85 cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
86 void (*func)(void *), void *arg)
87 {
88 struct user *up = p2->p_addr;
89
90 p2->p_md.md_tf = p1->p_md.md_tf;
91
92 #ifndef NO_IEEE
93 p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_FP_C);
94 #else
95 p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
96 #endif
97
98 /*
99 * Cache the physical address of the pcb, so we can
100 * swap to it easily.
101 */
102 p2->p_md.md_pcbpaddr = (void *)vtophys((vaddr_t)&up->u_pcb);
103
104 /*
105 * Copy floating point state from the FP chip to the PCB
106 * if this process has state stored there.
107 */
108 if (p1->p_addr->u_pcb.pcb_fpcpu != NULL)
109 fpusave_proc(p1, 1);
110
111 /*
112 * Copy pcb and stack from proc p1 to p2.
113 * If specified, give the child a different stack and/or TCB.
114 * We do this as cheaply as possible, copying only the active
115 * part of the stack. The stack and pcb need to agree;
116 */
117 up->u_pcb = p1->p_addr->u_pcb;
118 if (stack != NULL)
119 up->u_pcb.pcb_hw.apcb_usp = (u_long)stack;
120 else
121 up->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
122 if (tcb != NULL)
123 up->u_pcb.pcb_hw.apcb_unique = (unsigned long)tcb;
124
125 /*
126 * Arrange for a non-local goto when the new process
127 * is started, to resume here, returning nonzero from setjmp.
128 */
129 #ifdef DIAGNOSTIC
130 /*
131 * If p1 != curproc && p1 == &proc0, we are creating a kernel
132 * thread.
133 */
134 if (p1 != curproc && p1 != &proc0)
135 panic("cpu_fork: curproc");
136 #ifdef DEBUG
137 if ((up->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN) != 0)
138 printf("DANGER WILL ROBINSON: FEN SET IN cpu_fork!\n");
139 #endif
140 #endif
141
142 /*
143 * create the child's kernel stack, from scratch.
144 */
145 /*
146 * Pick a stack pointer, leaving room for a trapframe;
147 * copy trapframe from parent so return to user mode
148 * will be to right address, with correct registers.
149 */
150 p2->p_md.md_tf = (struct trapframe *)((char *)p2->p_addr + USPACE) - 1;
151 bcopy(p1->p_md.md_tf, p2->p_md.md_tf, sizeof(struct trapframe));
152
153 /*
154 * Arrange for continuation at child_return(), which
155 * will return to exception_return(). Note that the child
156 * process doesn't stay in the kernel for long!
157 */
158 up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2->p_md.md_tf;
159 up->u_pcb.pcb_context[0] = (u_int64_t)func;
160 up->u_pcb.pcb_context[1] =
161 (u_int64_t)exception_return; /* s1: ra */
162 up->u_pcb.pcb_context[2] = (u_int64_t)arg;
163 up->u_pcb.pcb_context[7] =
164 (u_int64_t)proc_trampoline; /* ra: assembly magic */
165 up->u_pcb.pcb_context[8] = IPL_SCHED; /* ps: IPL */
166 }
167
168 struct kmem_va_mode kv_physwait = {
169 .kv_map = &phys_map,
170 .kv_wait = 1,
171 };
172
173 /*
174 * Map a user I/O request into kernel virtual address space.
175 * Note: the pages are already locked by uvm_vslock(), so we
176 * do not need to pass an access_type to pmap_enter().
177 */
178 void
vmapbuf(struct buf * bp,vsize_t len)179 vmapbuf(struct buf *bp, vsize_t len)
180 {
181 vaddr_t faddr, taddr, off;
182 paddr_t fpa;
183
184 if ((bp->b_flags & B_PHYS) == 0)
185 panic("vmapbuf");
186 faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
187 off = (vaddr_t)bp->b_data - faddr;
188 len = round_page(off + len);
189 taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
190 bp->b_data = (caddr_t)(taddr + off);
191 /*
192 * The region is locked, so we expect that pmap_pte() will return
193 * non-NULL.
194 * XXX: unwise to expect this in a multithreaded environment.
195 * anything can happen to a pmap between the time we lock a
196 * region, release the pmap lock, and then relock it for
197 * the pmap_extract().
198 *
199 * no need to flush TLB since we expect nothing to be mapped
200 * where we just allocated (TLB will be flushed when our
201 * mapping is removed).
202 */
203 while (len) {
204 (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
205 faddr, &fpa);
206 pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
207 faddr += PAGE_SIZE;
208 taddr += PAGE_SIZE;
209 len -= PAGE_SIZE;
210 }
211 pmap_update(pmap_kernel());
212 }
213
214 /*
215 * Unmap a previously-mapped user I/O request.
216 */
217 void
vunmapbuf(struct buf * bp,vsize_t len)218 vunmapbuf(struct buf *bp, vsize_t len)
219 {
220 vaddr_t addr, off;
221
222 if ((bp->b_flags & B_PHYS) == 0)
223 panic("vunmapbuf");
224 addr = trunc_page((vaddr_t)bp->b_data);
225 off = (vaddr_t)bp->b_data - addr;
226 len = round_page(off + len);
227 pmap_kremove(addr, len);
228 pmap_update(pmap_kernel());
229 km_free((void *)addr, len, &kv_physwait, &kp_none);
230 bp->b_data = bp->b_saveaddr;
231 bp->b_saveaddr = NULL;
232 }
233
234 void *
tcb_get(struct proc * p)235 tcb_get(struct proc *p)
236 {
237 if (p == curproc)
238 return (void *)alpha_pal_rdunique();
239 else
240 return (void *)p->p_addr->u_pcb.pcb_hw.apcb_unique;
241 }
242
243 void
tcb_set(struct proc * p,void * newtcb)244 tcb_set(struct proc *p, void *newtcb)
245 {
246 KASSERT(p == curproc);
247
248 p->p_addr->u_pcb.pcb_hw.apcb_unique = (unsigned long)newtcb;
249 alpha_pal_wrunique((unsigned long)newtcb);
250 }
251