xref: /netbsd-src/sys/arch/arm/arm32/vm_machdep.c (revision 93bf6008f8b7982c1d1a9486e4a4a0e687fe36eb)
1 /*	$NetBSD: vm_machdep.c,v 1.49 2009/03/14 21:04:05 dsl Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * vm_machdep.h
40  *
41  * vm machine specific bits
42  *
43  * Created      : 08/10/94
44  */
45 
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.49 2009/03/14 21:04:05 dsl Exp $");
48 
49 #include "opt_armfpe.h"
50 #include "opt_pmap_debug.h"
51 #include "opt_perfctrs.h"
52 #include "opt_cputypes.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/proc.h>
57 #include <sys/malloc.h>
58 #include <sys/vnode.h>
59 #include <sys/buf.h>
60 #include <sys/pmc.h>
61 #include <sys/user.h>
62 #include <sys/exec.h>
63 #include <sys/syslog.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpu.h>
68 #include <machine/pmap.h>
69 #include <machine/reg.h>
70 #include <machine/vmparam.h>
71 
72 #ifdef ARMFPE
73 #include <arm/fpe-arm/armfpe.h>
74 #endif
75 
76 extern pv_addr_t systempage;
77 
78 int process_read_regs(struct proc *p, struct reg *regs);
79 int process_read_fpregs(struct proc *p, struct fpreg *regs);
80 
81 void lwp_trampoline(void);
82 
83 /*
84  * Special compilation symbols:
85  *
86  * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
87  *		 on forking and check the pattern on exit, reporting
88  *		 the amount of stack used.
89  */
90 
91 void
92 cpu_proc_fork(struct proc *p1, struct proc *p2)
93 {
94 
95 #if defined(PERFCTRS)
96 	if (PMC_ENABLED(p1))
97 		pmc_md_fork(p1, p2);
98 	else {
99 		p2->p_md.pmc_enabled = 0;
100 		p2->p_md.pmc_state = NULL;
101 	}
102 #endif
103 }
104 
105 void
106 cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
107 {
108 	struct pcb *pcb = &l->l_addr->u_pcb;
109 	struct trapframe *tf = pcb->pcb_tf;
110 	struct switchframe *sf = (struct switchframe *)tf - 1;
111 
112 	sf->sf_r4 = (u_int)func;
113 	sf->sf_r5 = (u_int)arg;
114 	sf->sf_sp = (u_int)tf;
115 	sf->sf_pc = (u_int)lwp_trampoline;
116 	pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
117 }
118 
119 /*
120  * Finish a fork operation, with LWP l2 nearly set up.
121  *
122  * Copy and update the pcb and trapframe, making the child ready to run.
123  *
124  * Rig the child's kernel stack so that it will start out in
125  * lwp_trampoline() which will call the specified func with the argument arg.
126  *
127  * If an alternate user-level stack is requested (with non-zero values
128  * in both the stack and stacksize args), set up the user stack pointer
129  * accordingly.
130  */
131 void
132 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
133     void (*func)(void *), void *arg)
134 {
135 	struct pcb *pcb = &l2->l_addr->u_pcb;
136 	struct trapframe *tf;
137 
138 #ifdef PMAP_DEBUG
139 	if (pmap_debug_level >= 0)
140 		printf("cpu_lwp_fork: %p %p %p %p\n", l1, l2, curlwp, &lwp0);
141 #endif	/* PMAP_DEBUG */
142 
143 #if 0 /* XXX */
144 	if (l1 == curlwp) {
145 		/* Sync the PCB before we copy it. */
146 		savectx(curpcb);
147 	}
148 #endif
149 
150 	l2->l_md.md_flags = l1->l_md.md_flags & MDP_VFPUSED;
151 
152 #ifdef FPU_VFP
153 	/*
154 	 * Copy the floating point state from the VFP to the PCB
155 	 * if this process has state stored there.
156 	 */
157 	if (l1->l_addr->u_pcb.pcb_vfpcpu != NULL)
158 		vfp_saveregs_lwp(l1, 1);
159 #endif
160 
161 	/* Copy the pcb */
162 	*pcb = l1->l_addr->u_pcb;
163 
164 	/*
165 	 * Set up the stack for the process.
166 	 * Note: this stack is not in use if we are forking from p1
167 	 */
168 	pcb->pcb_un.un_32.pcb32_sp = (u_int)l2->l_addr + USPACE_SVC_STACK_TOP;
169 
170 #ifdef STACKCHECKS
171 	/* Fill the kernel stack with a known pattern */
172 	memset(((u_char *)l2->l_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
173 	    (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
174 #endif	/* STACKCHECKS */
175 
176 #ifdef PMAP_DEBUG
177 	if (pmap_debug_level >= 0) {
178 		printf("l1->procaddr=%p l1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
179 		    l1->l_addr, &l1->l_addr->u_pcb, l1->l_lid,
180 		    l1->l_proc->p_vmspace->vm_map.pmap);
181 		printf("l2->procaddr=%p l2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
182 		    l2->l_addr, &l2->l_addr->u_pcb, l2->l_lid,
183 		    l2->l_proc->p_vmspace->vm_map.pmap);
184 	}
185 #endif	/* PMAP_DEBUG */
186 
187 #ifdef ARMFPE
188 	/* Initialise a new FP context for p2 and copy the context from p1 */
189 	arm_fpe_core_initcontext(FP_CONTEXT(l2));
190 	arm_fpe_copycontext(FP_CONTEXT(l1), FP_CONTEXT(l2));
191 #endif	/* ARMFPE */
192 
193 	l2->l_addr->u_pcb.pcb_tf = tf =
194 	    (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
195 	*tf = *l1->l_addr->u_pcb.pcb_tf;
196 
197 	/*
198 	 * If specified, give the child a different stack.
199 	 */
200 	if (stack != NULL)
201 		tf->tf_usr_sp = (u_int)stack + stacksize;
202 
203 	cpu_setfunc(l2, func, arg);
204 }
205 
206 /*
207  * cpu_exit is called as the last action during exit.
208  *
209  * We clean up a little and then call switch_exit() with the old proc as an
210  * argument.  switch_exit() first switches to proc0's context, and finally
211  * jumps into switch() to wait for another process to wake up.
212  */
213 
214 void
215 cpu_lwp_free(struct lwp *l, int proc)
216 {
217 #ifdef ARMFPE
218 	/* Abort any active FP operation and deactivate the context */
219 	arm_fpe_core_abort(FP_CONTEXT(l), NULL, NULL);
220 	arm_fpe_core_changecontext(0);
221 #endif	/* ARMFPE */
222 
223 #ifdef FPU_VFP
224 	if (l->l_addr->u_pcb.pcb_vfpcpu != NULL)
225 		vfp_saveregs_lwp(l, 0);
226 #endif
227 
228 #ifdef STACKCHECKS
229 	/* Report how much stack has been used - debugging */
230 	if (l) {
231 		u_char *ptr;
232 		int loop;
233 
234 		ptr = ((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM;
235 		for (loop = 0; loop < (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM)
236 		    && *ptr == 0xdd; ++loop, ++ptr) ;
237 		log(LOG_INFO, "%d bytes of svc stack fill pattern\n", loop);
238 	}
239 #endif	/* STACKCHECKS */
240 }
241 
242 void
243 cpu_lwp_free2(struct lwp *l)
244 {
245 }
246 
247 void
248 cpu_swapin(struct lwp *l)
249 {
250 #if 0
251 	struct proc *p = l->l_proc;
252 
253 	/* Don't do this.  See the comment in cpu_swapout().  */
254 #ifdef PMAP_DEBUG
255 	if (pmap_debug_level >= 0)
256 		printf("cpu_swapin(%p, %d, %s, %p)\n", l, l->l_lid,
257 		    p->p_comm, p->p_vmspace->vm_map.pmap);
258 #endif	/* PMAP_DEBUG */
259 
260 	if (vector_page < KERNEL_BASE) {
261 		/* Map the vector page */
262 		pmap_enter(p->p_vmspace->vm_map.pmap, vector_page,
263 		    systempage.pv_pa, VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
264 		pmap_update(p->p_vmspace->vm_map.pmap);
265 	}
266 #endif
267 }
268 
269 
270 void
271 cpu_swapout(struct lwp *l)
272 {
273 #ifdef FPU_VFP
274 	if (l->l_addr->u_pcb.pcb_vfpcpu != NULL)
275 		vfp_saveregs_lwp(l, 1);
276 #endif
277 
278 #if 0
279 	struct proc *p = l->l_proc;
280 
281 	/*
282 	 * Don't do this!  If the pmap is shared with another process,
283 	 * it will loose it's page0 entry.  That's bad news indeed.
284 	 */
285 #ifdef PMAP_DEBUG
286 	if (pmap_debug_level >= 0)
287 		printf("cpu_swapout(%p, %d, %s, %p)\n", l, l->l_lid,
288 		    p->p_comm, &p->p_vmspace->vm_map.pmap);
289 #endif	/* PMAP_DEBUG */
290 
291 	if (vector_page < KERNEL_BASE) {
292 		/* Free the system page mapping */
293 		pmap_remove(p->p_vmspace->vm_map.pmap, vector_page,
294 		    vector_page + PAGE_SIZE);
295 		pmap_update(p->p_vmspace->vm_map.pmap);
296 	}
297 #endif
298 }
299 
300 /*
301  * Map a user I/O request into kernel virtual address space.
302  * Note: the pages are already locked by uvm_vslock(), so we
303  * do not need to pass an access_type to pmap_enter().
304  */
305 void
306 vmapbuf(struct buf *bp, vsize_t len)
307 {
308 	vaddr_t faddr, taddr, off;
309 	paddr_t fpa;
310 
311 
312 #ifdef PMAP_DEBUG
313 	if (pmap_debug_level >= 0)
314 		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
315 		    (u_int)bp->b_data, (u_int)len);
316 #endif	/* PMAP_DEBUG */
317 
318 	if ((bp->b_flags & B_PHYS) == 0)
319 		panic("vmapbuf");
320 
321 	bp->b_saveaddr = bp->b_data;
322 	faddr = trunc_page((vaddr_t)bp->b_data);
323 	off = (vaddr_t)bp->b_data - faddr;
324 	len = round_page(off + len);
325 	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
326 	bp->b_data = (void *)(taddr + off);
327 
328 	/*
329 	 * The region is locked, so we expect that pmap_pte() will return
330 	 * non-NULL.
331 	 */
332 	while (len) {
333 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
334 		    faddr, &fpa);
335 		pmap_enter(pmap_kernel(), taddr, fpa,
336 			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
337 		faddr += PAGE_SIZE;
338 		taddr += PAGE_SIZE;
339 		len -= PAGE_SIZE;
340 	}
341 	pmap_update(pmap_kernel());
342 }
343 
344 /*
345  * Unmap a previously-mapped user I/O request.
346  */
347 void
348 vunmapbuf(struct buf *bp, vsize_t len)
349 {
350 	vaddr_t addr, off;
351 
352 #ifdef PMAP_DEBUG
353 	if (pmap_debug_level >= 0)
354 		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
355 		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
356 #endif	/* PMAP_DEBUG */
357 
358 	if ((bp->b_flags & B_PHYS) == 0)
359 		panic("vunmapbuf");
360 
361 	/*
362 	 * Make sure the cache does not have dirty data for the
363 	 * pages we had mapped.
364 	 */
365 	addr = trunc_page((vaddr_t)bp->b_data);
366 	off = (vaddr_t)bp->b_data - addr;
367 	len = round_page(off + len);
368 
369 	pmap_remove(pmap_kernel(), addr, addr + len);
370 	pmap_update(pmap_kernel());
371 	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
372 	bp->b_data = bp->b_saveaddr;
373 	bp->b_saveaddr = 0;
374 }
375 
376 /* End of vm_machdep.c */
377