xref: /netbsd-src/sys/arch/m68k/m68k/vm_machdep.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: vm_machdep.c,v 1.3 2003/04/02 00:00:46 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
41  *
42  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.3 2003/04/02 00:00:46 thorpej Exp $");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/buf.h>
53 #include <sys/vnode.h>
54 #include <sys/user.h>
55 #include <sys/core.h>
56 #include <sys/exec.h>
57 
58 #include <machine/frame.h>
59 #include <machine/cpu.h>
60 #include <machine/pte.h>
61 #include <machine/reg.h>
62 
63 #include <uvm/uvm_extern.h>
64 
65 void
66 cpu_proc_fork(p1, p2)
67 	struct proc *p1, *p2;
68 {
69 
70 	p2->p_md.mdp_flags = p1->p_md.mdp_flags;
71 }
72 
73 /*
74  * Finish a fork operation, with process l2 nearly set up.
75  * Copy and update the pcb and trap frame, making the child ready to run.
76  *
77  * Rig the child's kernel stack so that it will start out in
78  * proc_trampoline() and call child_return() with l2 as an
79  * argument. This causes the newly-created child process to go
80  * directly to user level with an apparent return value of 0 from
81  * fork(), while the parent process returns normally.
82  *
83  * l1 is the process being forked; if l1 == &lwp0, we are creating
84  * a kernel thread, and the return path and argument are specified with
85  * `func' and `arg'.
86  *
87  * If an alternate user-level stack is requested (with non-zero values
88  * in both the stack and stacksize args), set up the user stack pointer
89  * accordingly.
90  */
91 void
92 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg)
93 	struct lwp *l1, *l2;
94 	void *stack;
95 	size_t stacksize;
96 	void (*func) __P((void *));
97 	void *arg;
98 {
99 	struct pcb *pcb = &l2->l_addr->u_pcb;
100 	struct trapframe *tf;
101 	struct switchframe *sf;
102 	extern struct pcb *curpcb;
103 
104 	l2->l_md.md_flags = l1->l_md.md_flags;
105 
106 	/* Copy pcb from lwp l1 to l2. */
107 	if (l1 == curlwp) {
108 		/* Sync the PCB before we copy it. */
109 		savectx(curpcb);
110 	}
111 #ifdef DIAGNOSTIC
112 	else if (l1 != &lwp0)
113 		panic("cpu_lwp_fork: curlwp");
114 #endif
115 	*pcb = l1->l_addr->u_pcb;
116 
117 	/*
118 	 * Copy the trap frame.
119 	 */
120 	tf = (struct trapframe *)((u_int)l2->l_addr + USPACE) - 1;
121 	l2->l_md.md_regs = (int *)tf;
122 	*tf = *(struct trapframe *)l1->l_md.md_regs;
123 
124 	/*
125 	 * If specified, give the child a different stack.
126 	 */
127 	if (stack != NULL)
128 		tf->tf_regs[15] = (u_int)stack + stacksize;
129 
130 	sf = (struct switchframe *)tf - 1;
131 	sf->sf_pc = (u_int)proc_trampoline;
132 	pcb->pcb_regs[6] = (int)func;		/* A2 */
133 	pcb->pcb_regs[7] = (int)arg;		/* A3 */
134 	pcb->pcb_regs[11] = (int)sf;		/* SSP */
135 }
136 
137 void
138 cpu_setfunc(l, func, arg)
139 	struct lwp *l;
140 	void (*func) __P((void *));
141 	void *arg;
142 {
143 	struct pcb *pcb = &l->l_addr->u_pcb;
144 	struct trapframe *tf = (struct trapframe *)l->l_md.md_regs;
145 	struct switchframe *sf = (struct switchframe *)tf - 1;
146 	extern void proc_trampoline __P((void));
147 
148 	sf->sf_pc = (int)proc_trampoline;
149 	pcb->pcb_regs[6] = (int)func;		/* A2 */
150 	pcb->pcb_regs[7] = (int)arg;		/* A3 */
151 	pcb->pcb_regs[11] = (int)sf;		/* SSP */
152 }
153 
154 /*
155  * cpu_exit is called as the last action during exit.
156  *
157  * Block context switches and then call switch_exit() which will
158  * switch to another process thus we never return.
159  */
160 void
161 cpu_exit(l, proc)
162 	struct lwp *l;
163 	int proc;
164 {
165 
166 	(void) splhigh();
167 	uvmexp.swtch++;
168 	if (proc)
169 		switch_exit(l);
170 	else
171 		switch_lwp_exit(l);
172 	/* NOTREACHED */
173 }
174 
175 /*
176  * Dump the machine specific header information at the start of a core dump.
177  */
178 struct md_core {
179 	struct reg intreg;
180 	struct fpreg freg;
181 };
182 int
183 cpu_coredump(l, vp, cred, chdr)
184 	struct lwp *l;
185 	struct vnode *vp;
186 	struct ucred *cred;
187 	struct core *chdr;
188 {
189 	struct proc *p = l->l_proc;
190 	struct md_core md_core;
191 	struct coreseg cseg;
192 	int error;
193 
194 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
195 	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
196 	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
197 	chdr->c_cpusize = sizeof(md_core);
198 
199 	/* Save integer registers. */
200 	error = process_read_regs(l, &md_core.intreg);
201 	if (error)
202 		return error;
203 
204 	if (fputype) {
205 		/* Save floating point registers. */
206 		error = process_read_fpregs(l, &md_core.freg);
207 		if (error)
208 			return error;
209 	} else {
210 		/* Make sure these are clear. */
211 		memset((caddr_t)&md_core.freg, 0, sizeof(md_core.freg));
212 	}
213 
214 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
215 	cseg.c_addr = 0;
216 	cseg.c_size = chdr->c_cpusize;
217 
218 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
219 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred,
220 	    NULL, p);
221 	if (error)
222 		return error;
223 
224 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
225 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
226 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
227 	if (error)
228 		return error;
229 
230 	chdr->c_nseg++;
231 	return 0;
232 }
233 
234 /*
235  * Move pages from one kernel virtual address to another.
236  * Both addresses are assumed to reside in the Sysmap,
237  * and size must be a multiple of PAGE_SIZE.
238  */
239 void
240 pagemove(from, to, size)
241 	caddr_t from, to;
242 	size_t size;
243 {
244 	paddr_t pa;
245 	boolean_t rv;
246 
247 #ifdef DEBUG
248 	if (size & PGOFSET)
249 		panic("pagemove");
250 #endif
251 	while (size > 0) {
252 		rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
253 #ifdef DEBUG
254 		if (rv == FALSE)
255 			panic("pagemove 2");
256 		if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
257 			panic("pagemove 3");
258 #endif
259 		pmap_kremove((vaddr_t)from, PAGE_SIZE);
260 		pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
261 		from += PAGE_SIZE;
262 		to += PAGE_SIZE;
263 		size -= PAGE_SIZE;
264 	}
265 	pmap_update(pmap_kernel());
266 }
267 
268 /*
269  * Map a user I/O request into kernel virtual address space.
270  * Note: the pages are already locked by uvm_vslock(), so we
271  * do not need to pass an access_type to pmap_enter().
272  */
273 void
274 vmapbuf(bp, len)
275 	struct buf *bp;
276 	vsize_t len;
277 {
278 	struct pmap *upmap, *kpmap;
279 	vaddr_t uva;		/* User VA (map from) */
280 	vaddr_t kva;		/* Kernel VA (new to) */
281 	paddr_t pa; 		/* physical address */
282 	vsize_t off;
283 
284 	if ((bp->b_flags & B_PHYS) == 0)
285 		panic("vmapbuf");
286 
287 	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
288 	off = (vaddr_t)bp->b_data - uva;
289 	len = m68k_round_page(off + len);
290 	kva = uvm_km_valloc_wait(phys_map, len);
291 	bp->b_data = (caddr_t)(kva + off);
292 
293 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
294 	kpmap = vm_map_pmap(phys_map);
295 	do {
296 		if (pmap_extract(upmap, uva, &pa) == FALSE)
297 			panic("vmapbuf: null page frame");
298 #ifdef M68K_VAC
299 		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
300 		    PMAP_WIRED);
301 #else
302 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
303 #endif
304 		uva += PAGE_SIZE;
305 		kva += PAGE_SIZE;
306 		len -= PAGE_SIZE;
307 	} while (len);
308 	pmap_update(kpmap);
309 }
310 
311 /*
312  * Unmap a previously-mapped user I/O request.
313  */
314 void
315 vunmapbuf(bp, len)
316 	struct buf *bp;
317 	vsize_t len;
318 {
319 	vaddr_t kva;
320 	vsize_t off;
321 
322 	if ((bp->b_flags & B_PHYS) == 0)
323 		panic("vunmapbuf");
324 
325 	kva = m68k_trunc_page(bp->b_data);
326 	off = (vaddr_t)bp->b_data - kva;
327 	len = m68k_round_page(off + len);
328 
329 #ifdef M68K_VAC
330 	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
331 #else
332 	pmap_kremove(kva, len);
333 #endif
334 	pmap_update(pmap_kernel());
335 	uvm_km_free_wakeup(phys_map, kva, len);
336 	bp->b_data = bp->b_saveaddr;
337 	bp->b_saveaddr = 0;
338 }
339 
340 
341 #if defined(M68K_MMU_MOTOROLA) || defined(M68K_MMU_HP)
342 
343 #include <m68k/cacheops.h>
344 
345 /*
346  * Map `size' bytes of physical memory starting at `paddr' into
347  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
348  * are specified by `prot'.
349  */
350 void
351 physaccess(vaddr, paddr, size, prot)
352 	caddr_t vaddr, paddr;
353 	int size, prot;
354 {
355 	pt_entry_t *pte;
356 	u_int page;
357 
358 	pte = kvtopte(vaddr);
359 	page = (u_int)paddr & PG_FRAME;
360 	for (size = btoc(size); size; size--) {
361 		*pte++ = PG_V | prot | page;
362 		page += PAGE_SIZE;
363 	}
364 	TBIAS();
365 }
366 
367 void
368 physunaccess(vaddr, size)
369 	caddr_t vaddr;
370 	int size;
371 {
372 	pt_entry_t *pte;
373 
374 	pte = kvtopte(vaddr);
375 	for (size = btoc(size); size; size--)
376 		*pte++ = PG_NV;
377 	TBIAS();
378 }
379 
380 /*
381  * Convert kernel VA to physical address
382  */
383 int
384 kvtop(addr)
385 	caddr_t addr;
386 {
387 	paddr_t pa;
388 
389 	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE)
390 		panic("kvtop: zero page frame");
391 	return((int)pa);
392 }
393 
394 #endif
395