xref: /netbsd-src/sys/arch/m68k/m68k/vm_machdep.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: vm_machdep.c,v 1.39 2013/10/25 20:53:02 martin Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
37  *
38  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.39 2013/10/25 20:53:02 martin Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/cpu.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/vnode.h>
51 #include <sys/core.h>
52 #include <sys/exec.h>
53 
54 #include <machine/frame.h>
55 #include <machine/pte.h>
56 #include <machine/pcb.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 void
61 cpu_proc_fork(struct proc *p1, struct proc *p2)
62 {
63 
64 	p2->p_md.mdp_flags = p1->p_md.mdp_flags;
65 }
66 
67 /*
68  * Finish a fork operation, with process l2 nearly set up.
69  * Copy and update the pcb and trap frame, making the child ready to run.
70  *
71  * Rig the child's kernel stack so that it will start out in
72  * lwp_trampoline() and call child_return() with l2 as an
73  * argument. This causes the newly-created child process to go
74  * directly to user level with an apparent return value of 0 from
75  * fork(), while the parent process returns normally.
76  *
77  * l1 is the process being forked; if l1 == &lwp0, we are creating
78  * a kernel thread, and the return path and argument are specified with
79  * `func' and `arg'.
80  *
81  * If an alternate user-level stack is requested (with non-zero values
82  * in both the stack and stacksize args), set up the user stack pointer
83  * accordingly.
84  */
85 void
86 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
87     void (*func)(void *), void *arg)
88 {
89 	struct pcb *pcb1, *pcb2;
90 	struct trapframe *tf;
91 	struct switchframe *sf;
92 
93 	pcb1 = lwp_getpcb(l1);
94 	pcb2 = lwp_getpcb(l2);
95 
96 	l2->l_md.md_flags = l1->l_md.md_flags;
97 
98 	/* Copy pcb from lwp l1 to l2. */
99 	if (l1 == curlwp) {
100 		/* Sync the PCB before we copy it. */
101 		savectx(curpcb);
102 	} else {
103 		KASSERT(l1 == &lwp0);
104 	}
105 
106 	*pcb2 = *pcb1;
107 
108 	/*
109 	 * Copy the trap frame.
110 	 */
111 	tf = (struct trapframe *)(uvm_lwp_getuarea(l2) + USPACE) - 1;
112 	l2->l_md.md_regs = (int *)tf;
113 	*tf = *(struct trapframe *)l1->l_md.md_regs;
114 
115 	/*
116 	 * If specified, give the child a different stack.
117 	 */
118 	if (stack != NULL)
119 		tf->tf_regs[15] = (u_int)stack + stacksize;
120 
121 	sf = (struct switchframe *)tf - 1;
122 	sf->sf_pc = (u_int)lwp_trampoline;
123 	pcb2->pcb_regs[6] = (int)func;		/* A2 */
124 	pcb2->pcb_regs[7] = (int)arg;		/* A3 */
125 	pcb2->pcb_regs[8] = (int)l2;		/* A4 */
126 	pcb2->pcb_regs[11] = (int)sf;		/* SSP */
127 	pcb2->pcb_ps = PSL_LOWIPL;		/* start kthreads at IPL 0 */
128 }
129 
130 void
131 cpu_lwp_free(struct lwp *l, int proc)
132 {
133 
134 	/* Nothing to do */
135 }
136 
137 void
138 cpu_lwp_free2(struct lwp *l)
139 {
140 
141 	/* Nothing to do */
142 }
143 
144 /*
145  * Map a user I/O request into kernel virtual address space.
146  * Note: the pages are already locked by uvm_vslock(), so we
147  * do not need to pass an access_type to pmap_enter().
148  */
149 int
150 vmapbuf(struct buf *bp, vsize_t len)
151 {
152 	struct pmap *upmap, *kpmap __unused;
153 	vaddr_t uva;		/* User VA (map from) */
154 	vaddr_t kva;		/* Kernel VA (new to) */
155 	paddr_t pa; 		/* physical address */
156 	vsize_t off;
157 
158 	if ((bp->b_flags & B_PHYS) == 0)
159 		panic("vmapbuf");
160 
161 	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
162 	off = (vaddr_t)bp->b_data - uva;
163 	len = m68k_round_page(off + len);
164 	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
165 	bp->b_data = (void *)(kva + off);
166 
167 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
168 	kpmap = vm_map_pmap(phys_map);
169 	do {
170 		if (pmap_extract(upmap, uva, &pa) == false)
171 			panic("vmapbuf: null page frame");
172 #ifdef M68K_VAC
173 		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
174 		    PMAP_WIRED);
175 #else
176 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
177 #endif
178 		uva += PAGE_SIZE;
179 		kva += PAGE_SIZE;
180 		len -= PAGE_SIZE;
181 	} while (len);
182 	pmap_update(kpmap);
183 
184 	return 0;
185 }
186 
187 /*
188  * Unmap a previously-mapped user I/O request.
189  */
190 void
191 vunmapbuf(struct buf *bp, vsize_t len)
192 {
193 	vaddr_t kva;
194 	vsize_t off;
195 
196 	if ((bp->b_flags & B_PHYS) == 0)
197 		panic("vunmapbuf");
198 
199 	kva = m68k_trunc_page(bp->b_data);
200 	off = (vaddr_t)bp->b_data - kva;
201 	len = m68k_round_page(off + len);
202 
203 #ifdef M68K_VAC
204 	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
205 #else
206 	pmap_kremove(kva, len);
207 #endif
208 	pmap_update(pmap_kernel());
209 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
210 	bp->b_data = bp->b_saveaddr;
211 	bp->b_saveaddr = 0;
212 }
213 
214 
215 #if defined(M68K_MMU_MOTOROLA) || defined(M68K_MMU_HP)
216 
217 #include <m68k/cacheops.h>
218 
219 /*
220  * Map `size' bytes of physical memory starting at `paddr' into
221  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
222  * are specified by `prot'.
223  */
224 void
225 physaccess(void *vaddr, void *paddr, int size, int prot)
226 {
227 	pt_entry_t *pte;
228 	u_int page;
229 
230 	pte = kvtopte(vaddr);
231 	page = (u_int)paddr & PG_FRAME;
232 	for (size = btoc(size); size; size--) {
233 		*pte++ = PG_V | prot | page;
234 		page += PAGE_SIZE;
235 	}
236 	TBIAS();
237 }
238 
239 void
240 physunaccess(void *vaddr, int size)
241 {
242 	pt_entry_t *pte;
243 
244 	pte = kvtopte(vaddr);
245 	for (size = btoc(size); size; size--)
246 		*pte++ = PG_NV;
247 	TBIAS();
248 }
249 
250 /*
251  * Convert kernel VA to physical address
252  */
253 int
254 kvtop(void *addr)
255 {
256 	paddr_t pa;
257 
258 	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == false)
259 		panic("kvtop: zero page frame");
260 	return (int)pa;
261 }
262 
263 #endif
264 
265