xref: /netbsd-src/sys/arch/sparc/sparc/vm_machdep.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: vm_machdep.c,v 1.71 2003/01/18 06:45:07 thorpej Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Lawrence Berkeley Laboratory.
17  *	This product includes software developed by Harvard University.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. All advertising materials mentioning features or use of this software
28  *    must display the following acknowledgement:
29  *	This product includes software developed by Harvard University.
30  *	This product includes software developed by the University of
31  *	California, Berkeley and its contributors.
32  * 4. Neither the name of the University nor the names of its contributors
33  *    may be used to endorse or promote products derived from this software
34  *    without specific prior written permission.
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46  * SUCH DAMAGE.
47  *
48  *	@(#)vm_machdep.c	8.2 (Berkeley) 9/23/93
49  */
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/user.h>
55 #include <sys/core.h>
56 #include <sys/malloc.h>
57 #include <sys/buf.h>
58 #include <sys/exec.h>
59 #include <sys/vnode.h>
60 
61 #include <uvm/uvm_extern.h>
62 
63 #include <machine/cpu.h>
64 #include <machine/frame.h>
65 #include <machine/trap.h>
66 
67 #include <sparc/sparc/cpuvar.h>
68 
69 /*
70  * Move pages from one kernel virtual address to another.
71  */
72 void
73 pagemove(from, to, size)
74 	caddr_t from, to;
75 	size_t size;
76 {
77 	paddr_t pa;
78 
79 	if (size & PGOFSET || (int)from & PGOFSET || (int)to & PGOFSET)
80 		panic("pagemove 1");
81 	while (size > 0) {
82 		if (pmap_extract(pmap_kernel(), (vaddr_t)from, &pa) == FALSE)
83 			panic("pagemove 2");
84 		pmap_kremove((vaddr_t)from, PAGE_SIZE);
85 		pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
86 		from += PAGE_SIZE;
87 		to += PAGE_SIZE;
88 		size -= PAGE_SIZE;
89 	}
90 	pmap_update(pmap_kernel());
91 }
92 
93 
94 /*
95  * Map a user I/O request into kernel virtual address space.
96  * Note: the pages are already locked by uvm_vslock(), so we
97  * do not need to pass an access_type to pmap_enter().
98  */
99 void
100 vmapbuf(bp, len)
101 	struct buf *bp;
102 	vsize_t len;
103 {
104 	struct pmap *upmap, *kpmap;
105 	vaddr_t uva;	/* User VA (map from) */
106 	vaddr_t kva;	/* Kernel VA (new to) */
107 	paddr_t pa; 	/* physical address */
108 	vsize_t off;
109 
110 	if ((bp->b_flags & B_PHYS) == 0)
111 		panic("vmapbuf");
112 
113 	/*
114 	 * XXX:  It might be better to round/trunc to a
115 	 * segment boundary to avoid VAC problems!
116 	 */
117 	bp->b_saveaddr = bp->b_data;
118 	uva = trunc_page((vaddr_t)bp->b_data);
119 	off = (vaddr_t)bp->b_data - uva;
120 	len = round_page(off + len);
121 	kva = uvm_km_valloc_wait(kernel_map, len);
122 	bp->b_data = (caddr_t)(kva + off);
123 
124 	/*
125 	 * We have to flush any write-back cache on the
126 	 * user-space mappings so our new mappings will
127 	 * have the correct contents.
128 	 */
129 	if (CACHEINFO.c_vactype != VAC_NONE)
130 		cache_flush((caddr_t)uva, len);
131 
132 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
133 	kpmap = vm_map_pmap(kernel_map);
134 	do {
135 		if (pmap_extract(upmap, uva, &pa) == FALSE)
136 			panic("vmapbuf: null page frame");
137 		/* Now map the page into kernel space. */
138 		pmap_enter(kpmap, kva, pa,
139 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
140 		uva += PAGE_SIZE;
141 		kva += PAGE_SIZE;
142 		len -= PAGE_SIZE;
143 	} while (len);
144 	pmap_update(kpmap);
145 }
146 
147 /*
148  * Unmap a previously-mapped user I/O request.
149  */
150 void
151 vunmapbuf(bp, len)
152 	struct buf *bp;
153 	vsize_t len;
154 {
155 	vaddr_t kva;
156 	vsize_t off;
157 
158 	if ((bp->b_flags & B_PHYS) == 0)
159 		panic("vunmapbuf");
160 
161 	kva = trunc_page((vaddr_t)bp->b_data);
162 	off = (vaddr_t)bp->b_data - kva;
163 	len = round_page(off + len);
164 	pmap_remove(vm_map_pmap(kernel_map), kva, kva + len);
165 	pmap_update(vm_map_pmap(kernel_map));
166 	uvm_km_free_wakeup(kernel_map, kva, len);
167 	bp->b_data = bp->b_saveaddr;
168 	bp->b_saveaddr = NULL;
169 
170 #if 0	/* XXX: The flush above is sufficient, right? */
171 	if (CACHEINFO.c_vactype != VAC_NONE)
172 		cpuinfo.cache_flush(bp->b_data, len);
173 #endif
174 }
175 
176 
177 /*
178  * The offset of the topmost frame in the kernel stack.
179  */
180 #define	TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-sizeof(struct frame))
181 
182 /*
183  * Finish a fork operation, with process l2 nearly set up.
184  * Copy and update the pcb and trap frame, making the child ready to run.
185  *
186  * Rig the child's kernel stack so that it will start out in
187  * proc_trampoline() and call child_return() with l2 as an
188  * argument. This causes the newly-created child process to go
189  * directly to user level with an apparent return value of 0 from
190  * fork(), while the parent process returns normally.
191  *
192  * l1 is the process being forked; if l1 == &lwp0, we are creating
193  * a kernel thread, and the return path and argument are specified with
194  * `func' and `arg'.
195  *
196  * If an alternate user-level stack is requested (with non-zero values
197  * in both the stack and stacksize args), set up the user stack pointer
198  * accordingly.
199  */
200 void
201 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg)
202 	struct lwp *l1, *l2;
203 	void *stack;
204 	size_t stacksize;
205 	void (*func) __P((void *));
206 	void *arg;
207 {
208 	struct pcb *opcb = &l1->l_addr->u_pcb;
209 	struct pcb *npcb = &l2->l_addr->u_pcb;
210 	struct trapframe *tf2;
211 	struct rwindow *rp;
212 
213 	/*
214 	 * Save all user registers to l1's stack or, in the case of
215 	 * user registers and invalid stack pointers, to opcb.
216 	 * We then copy the whole pcb to p2; when switch() selects p2
217 	 * to run, it will run at the `proc_trampoline' stub, rather
218 	 * than returning at the copying code below.
219 	 *
220 	 * If process l1 has an FPU state, we must copy it.  If it is
221 	 * the FPU user, we must save the FPU state first.
222 	 */
223 
224 	if (l1 == curlwp) {
225 		write_user_windows();
226 		opcb->pcb_psr = getpsr();
227 	}
228 #ifdef DIAGNOSTIC
229 	else if (l1 != &lwp0)
230 		panic("cpu_lwp_fork: curlwp");
231 #endif
232 
233 	bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb));
234 	if (l1->l_md.md_fpstate != NULL) {
235 		struct cpu_info *cpi;
236 		int s;
237 
238 		l2->l_md.md_fpstate = malloc(sizeof(struct fpstate),
239 		    M_SUBPROC, M_WAITOK);
240 
241 		FPU_LOCK(s);
242 		if ((cpi = l1->l_md.md_fpu) != NULL) {
243 			if (cpi->fplwp != l1)
244 				panic("FPU(%d): fplwp %p",
245 					cpi->ci_cpuid, cpi->fplwp);
246 			if (l1 == cpuinfo.fplwp)
247 				savefpstate(l1->l_md.md_fpstate);
248 #if defined(MULTIPROCESSOR)
249 			else
250 				XCALL1(savefpstate, l1->l_md.md_fpstate,
251 					1 << cpi->ci_cpuid);
252 #endif
253 		}
254 		bcopy(l1->l_md.md_fpstate, l2->l_md.md_fpstate,
255 		    sizeof(struct fpstate));
256 		FPU_UNLOCK(s);
257 	} else
258 		l2->l_md.md_fpstate = NULL;
259 
260 	l2->l_md.md_fpu = NULL;
261 
262 	/*
263 	 * Setup (kernel) stack frame that will by-pass the child
264 	 * out of the kernel. (The trap frame invariably resides at
265 	 * the tippity-top of the u. area.)
266 	 */
267 	tf2 = l2->l_md.md_tf = (struct trapframe *)
268 			((int)npcb + USPACE - sizeof(*tf2));
269 
270 	/* Copy parent's trapframe */
271 	*tf2 = *(struct trapframe *)((int)opcb + USPACE - sizeof(*tf2));
272 
273 	/*
274 	 * If specified, give the child a different stack.
275 	 */
276 	if (stack != NULL)
277 		tf2->tf_out[6] = (u_int)stack + stacksize;
278 
279 	/*
280 	 * The fork system call always uses the old system call
281 	 * convention; clear carry and skip trap instruction as
282 	 * in syscall().
283 	 * note: proc_trampoline() sets a fresh psr when returning
284 	 * to user mode.
285 	 */
286 	/*tf2->tf_psr &= ~PSR_C;   -* success */
287 	tf2->tf_pc = tf2->tf_npc;
288 	tf2->tf_npc = tf2->tf_pc + 4;
289 
290 	/* Set return values in child mode */
291 	tf2->tf_out[0] = 0;
292 	tf2->tf_out[1] = 1;
293 
294 	/* Construct kernel frame to return to in cpu_switch() */
295 	rp = (struct rwindow *)((u_int)npcb + TOPFRAMEOFF);
296 	rp->rw_local[0] = (int)func;		/* Function to call */
297 	rp->rw_local[1] = (int)arg;		/* and its argument */
298 
299 	npcb->pcb_pc = (int)proc_trampoline - 8;
300 	npcb->pcb_sp = (int)rp;
301 	npcb->pcb_psr &= ~PSR_CWP;	/* Run in window #0 */
302 	npcb->pcb_wim = 1;		/* Fence at window #1 */
303 }
304 
305 /*
306  * cpu_exit is called as the last action during exit.
307  *
308  * We clean up a little and then call switchexit() with the old proc
309  * as an argument.  switchexit() switches to the idle context, schedules
310  * the old vmspace and stack to be freed, then selects a new process to
311  * run.
312  *
313  * If proc==0, we're an exiting lwp, and call switch_lwp_exit() instead of
314  * switch_exit(), and only do LWP-appropriate cleanup (e.g. don't deactivate
315  * the pmap).
316  */
317 void
318 cpu_exit(l, proc)
319 	struct lwp *l;
320 	int proc;
321 {
322 	struct fpstate *fs;
323 
324 	if ((fs = l->l_md.md_fpstate) != NULL) {
325 		struct cpu_info *cpi;
326 		int s;
327 
328 		FPU_LOCK(s);
329 		if ((cpi = l->l_md.md_fpu) != NULL) {
330 			if (cpi->fplwp != l)
331 				panic("FPU(%d): fplwp %p",
332 					cpi->ci_cpuid, cpi->fplwp);
333 			if (l == cpuinfo.fplwp)
334 				savefpstate(fs);
335 #if defined(MULTIPROCESSOR)
336 			else
337 				XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid);
338 #endif
339 			cpi->fplwp = NULL;
340 		}
341 		FPU_UNLOCK(s);
342 		free((void *)fs, M_SUBPROC);
343 	}
344 	switchexit(l, proc ? exit2 : lwp_exit2);
345 	/* NOTREACHED */
346 }
347 
348 void
349 cpu_setfunc(l, func, arg)
350 	struct lwp *l;
351 	void (*func) __P((void *));
352 	void *arg;
353 {
354 	struct pcb *pcb = &l->l_addr->u_pcb;
355 	/*struct trapframe *tf = l->l_md.md_tf;*/
356 	struct rwindow *rp;
357 
358 	/* Construct kernel frame to return to in cpu_switch() */
359 	rp = (struct rwindow *)((u_int)pcb + TOPFRAMEOFF);
360 	rp->rw_local[0] = (int)func;		/* Function to call */
361 	rp->rw_local[1] = (int)arg;		/* and its argument */
362 
363 	pcb->pcb_pc = (int)proc_trampoline - 8;
364 	pcb->pcb_sp = (int)rp;
365 	pcb->pcb_psr &= ~PSR_CWP;	/* Run in window #0 */
366 	pcb->pcb_wim = 1;		/* Fence at window #1 */
367 }
368 
369 /*
370  * cpu_coredump is called to write a core dump header.
371  * (should this be defined elsewhere?  machdep.c?)
372  */
373 int
374 cpu_coredump(l, vp, cred, chdr)
375 	struct lwp *l;
376 	struct vnode *vp;
377 	struct ucred *cred;
378 	struct core *chdr;
379 {
380 	int error;
381 	struct md_coredump md_core;
382 	struct coreseg cseg;
383 	struct proc *p;
384 
385 	p = l->l_proc;
386 
387 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
388 	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
389 	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
390 	chdr->c_cpusize = sizeof(md_core);
391 
392 	md_core.md_tf = *l->l_md.md_tf;
393 	if (l->l_md.md_fpstate) {
394 		if (l == cpuinfo.fplwp)
395 			savefpstate(l->l_md.md_fpstate);
396 		md_core.md_fpstate = *l->l_md.md_fpstate;
397 	} else
398 		bzero((caddr_t)&md_core.md_fpstate, sizeof(struct fpstate));
399 
400 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
401 	cseg.c_addr = 0;
402 	cseg.c_size = chdr->c_cpusize;
403 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
404 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
405 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
406 	if (error)
407 		return error;
408 
409 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
410 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
411 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
412 	if (!error)
413 		chdr->c_nseg++;
414 
415 	return error;
416 }
417