xref: /netbsd-src/sys/arch/m68k/m68k/vm_machdep.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: vm_machdep.c,v 1.26 2007/10/17 19:55:12 garbled Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
36  *
37  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
38  */
39 /*
40  * Copyright (c) 1988 University of Utah.
41  *
42  * This code is derived from software contributed to Berkeley by
43  * the Systems Programming Group of the University of Utah Computer
44  * Science Department.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. All advertising materials mentioning features or use of this software
55  *    must display the following acknowledgement:
56  *	This product includes software developed by the University of
57  *	California, Berkeley and its contributors.
58  * 4. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
75  *
76  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.26 2007/10/17 19:55:12 garbled Exp $");
81 
82 #include "opt_coredump.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/proc.h>
87 #include <sys/malloc.h>
88 #include <sys/buf.h>
89 #include <sys/vnode.h>
90 #include <sys/user.h>
91 #include <sys/core.h>
92 #include <sys/exec.h>
93 
94 #include <machine/frame.h>
95 #include <machine/cpu.h>
96 #include <machine/pte.h>
97 #include <machine/reg.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 void
102 cpu_proc_fork(struct proc *p1, struct proc *p2)
103 {
104 
105 	p2->p_md.mdp_flags = p1->p_md.mdp_flags;
106 }
107 
108 /*
109  * Finish a fork operation, with process l2 nearly set up.
110  * Copy and update the pcb and trap frame, making the child ready to run.
111  *
112  * Rig the child's kernel stack so that it will start out in
113  * lwp_trampoline() and call child_return() with l2 as an
114  * argument. This causes the newly-created child process to go
115  * directly to user level with an apparent return value of 0 from
116  * fork(), while the parent process returns normally.
117  *
118  * l1 is the process being forked; if l1 == &lwp0, we are creating
119  * a kernel thread, and the return path and argument are specified with
120  * `func' and `arg'.
121  *
122  * If an alternate user-level stack is requested (with non-zero values
123  * in both the stack and stacksize args), set up the user stack pointer
124  * accordingly.
125  */
126 void
127 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
128     void (*func)(void *), void *arg)
129 {
130 	struct pcb *pcb = &l2->l_addr->u_pcb;
131 	struct trapframe *tf;
132 	struct switchframe *sf;
133 	extern struct pcb *curpcb;
134 
135 	l2->l_md.md_flags = l1->l_md.md_flags;
136 
137 	/* Copy pcb from lwp l1 to l2. */
138 	if (l1 == curlwp) {
139 		/* Sync the PCB before we copy it. */
140 		savectx(curpcb);
141 	}
142 #ifdef DIAGNOSTIC
143 	else if (l1 != &lwp0)
144 		panic("cpu_lwp_fork: curlwp");
145 #endif
146 	*pcb = l1->l_addr->u_pcb;
147 
148 	/*
149 	 * Copy the trap frame.
150 	 */
151 	tf = (struct trapframe *)((u_int)l2->l_addr + USPACE) - 1;
152 	l2->l_md.md_regs = (int *)tf;
153 	*tf = *(struct trapframe *)l1->l_md.md_regs;
154 
155 	/*
156 	 * If specified, give the child a different stack.
157 	 */
158 	if (stack != NULL)
159 		tf->tf_regs[15] = (u_int)stack + stacksize;
160 
161 	sf = (struct switchframe *)tf - 1;
162 	sf->sf_pc = (u_int)lwp_trampoline;
163 	pcb->pcb_regs[6] = (int)func;		/* A2 */
164 	pcb->pcb_regs[7] = (int)arg;		/* A3 */
165 	pcb->pcb_regs[8] = (int)l2;		/* A4 */
166 	pcb->pcb_regs[11] = (int)sf;		/* SSP */
167 	pcb->pcb_ps = PSL_LOWIPL;		/* start kthreads at IPL 0 */
168 }
169 
170 void
171 cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
172 {
173 	struct pcb *pcb = &l->l_addr->u_pcb;
174 	struct trapframe *tf = (struct trapframe *)l->l_md.md_regs;
175 	struct switchframe *sf = (struct switchframe *)tf - 1;
176 
177 	sf->sf_pc = (u_int)lwp_trampoline;
178 	pcb->pcb_regs[6] = (int)func;		/* A2 */
179 	pcb->pcb_regs[7] = (int)arg;		/* A3 */
180 	pcb->pcb_regs[11] = (int)sf;		/* SSP */
181 }
182 
183 void
184 cpu_lwp_free(struct lwp *l, int proc)
185 {
186 
187 	/* Nothing to do */
188 }
189 
190 void
191 cpu_lwp_free2(struct lwp *l)
192 {
193 
194 	/* Nothing to do */
195 }
196 
197 #ifdef COREDUMP
198 /*
199  * Dump the machine specific header information at the start of a core dump.
200  */
201 struct md_core {
202 	struct reg intreg;
203 	struct fpreg freg;
204 };
205 
206 int
207 cpu_coredump(struct lwp *l, void *iocookie, struct core *chdr)
208 {
209 	struct md_core md_core;
210 	struct coreseg cseg;
211 	int error;
212 
213 	if (iocookie == NULL) {
214 		CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
215 		chdr->c_hdrsize = ALIGN(sizeof(*chdr));
216 		chdr->c_seghdrsize = ALIGN(sizeof(cseg));
217 		chdr->c_cpusize = sizeof(md_core);
218 		chdr->c_nseg++;
219 		return 0;
220 	}
221 
222 	/* Save integer registers. */
223 	error = process_read_regs(l, &md_core.intreg);
224 	if (error)
225 		return error;
226 
227 	if (fputype) {
228 		/* Save floating point registers. */
229 		error = process_read_fpregs(l, &md_core.freg);
230 		if (error)
231 			return error;
232 	} else {
233 		/* Make sure these are clear. */
234 		memset((void *)&md_core.freg, 0, sizeof(md_core.freg));
235 	}
236 
237 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
238 	cseg.c_addr = 0;
239 	cseg.c_size = chdr->c_cpusize;
240 
241 	error = coredump_write(iocookie, UIO_SYSSPACE, &cseg,
242 	    chdr->c_seghdrsize);
243 	if (error)
244 		return error;
245 
246 	return coredump_write(iocookie, UIO_SYSSPACE, &md_core,
247 	    sizeof(md_core));
248 }
249 #endif
250 
251 /*
252  * Map a user I/O request into kernel virtual address space.
253  * Note: the pages are already locked by uvm_vslock(), so we
254  * do not need to pass an access_type to pmap_enter().
255  */
256 void
257 vmapbuf(struct buf *bp, vsize_t len)
258 {
259 	struct pmap *upmap, *kpmap;
260 	vaddr_t uva;		/* User VA (map from) */
261 	vaddr_t kva;		/* Kernel VA (new to) */
262 	paddr_t pa; 		/* physical address */
263 	vsize_t off;
264 
265 	if ((bp->b_flags & B_PHYS) == 0)
266 		panic("vmapbuf");
267 
268 	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
269 	off = (vaddr_t)bp->b_data - uva;
270 	len = m68k_round_page(off + len);
271 	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
272 	bp->b_data = (void *)(kva + off);
273 
274 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
275 	kpmap = vm_map_pmap(phys_map);
276 	do {
277 		if (pmap_extract(upmap, uva, &pa) == false)
278 			panic("vmapbuf: null page frame");
279 #ifdef M68K_VAC
280 		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
281 		    PMAP_WIRED);
282 #else
283 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
284 #endif
285 		uva += PAGE_SIZE;
286 		kva += PAGE_SIZE;
287 		len -= PAGE_SIZE;
288 	} while (len);
289 	pmap_update(kpmap);
290 }
291 
292 /*
293  * Unmap a previously-mapped user I/O request.
294  */
295 void
296 vunmapbuf(struct buf *bp, vsize_t len)
297 {
298 	vaddr_t kva;
299 	vsize_t off;
300 
301 	if ((bp->b_flags & B_PHYS) == 0)
302 		panic("vunmapbuf");
303 
304 	kva = m68k_trunc_page(bp->b_data);
305 	off = (vaddr_t)bp->b_data - kva;
306 	len = m68k_round_page(off + len);
307 
308 #ifdef M68K_VAC
309 	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
310 #else
311 	pmap_kremove(kva, len);
312 #endif
313 	pmap_update(pmap_kernel());
314 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
315 	bp->b_data = bp->b_saveaddr;
316 	bp->b_saveaddr = 0;
317 }
318 
319 
320 #if defined(M68K_MMU_MOTOROLA) || defined(M68K_MMU_HP)
321 
322 #include <m68k/cacheops.h>
323 
324 /*
325  * Map `size' bytes of physical memory starting at `paddr' into
326  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
327  * are specified by `prot'.
328  */
329 void
330 physaccess(void *vaddr, void *paddr, int size, int prot)
331 {
332 	pt_entry_t *pte;
333 	u_int page;
334 
335 	pte = kvtopte(vaddr);
336 	page = (u_int)paddr & PG_FRAME;
337 	for (size = btoc(size); size; size--) {
338 		*pte++ = PG_V | prot | page;
339 		page += PAGE_SIZE;
340 	}
341 	TBIAS();
342 }
343 
344 void
345 physunaccess(void *vaddr, int size)
346 {
347 	pt_entry_t *pte;
348 
349 	pte = kvtopte(vaddr);
350 	for (size = btoc(size); size; size--)
351 		*pte++ = PG_NV;
352 	TBIAS();
353 }
354 
355 /*
356  * Convert kernel VA to physical address
357  */
358 int
359 kvtop(void *addr)
360 {
361 	paddr_t pa;
362 
363 	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == false)
364 		panic("kvtop: zero page frame");
365 	return (int)pa;
366 }
367 
368 #endif
369