1 /* $NetBSD: vm_machdep.c,v 1.40 2007/10/17 19:53:31 garbled Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Brini. 21 * 4. The name of the company nor the name of the author may be used to 22 * endorse or promote products derived from this software without specific 23 * prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * RiscBSD kernel project 38 * 39 * vm_machdep.h 40 * 41 * vm machine specific bits 42 * 43 * Created : 08/10/94 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.40 2007/10/17 19:53:31 garbled Exp $"); 48 49 #include "opt_armfpe.h" 50 #include "opt_pmap_debug.h" 51 #include "opt_perfctrs.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/malloc.h> 57 #include <sys/vnode.h> 58 #include <sys/buf.h> 59 #include <sys/pmc.h> 60 #include <sys/user.h> 61 #include <sys/exec.h> 62 #include <sys/syslog.h> 63 64 #include <uvm/uvm_extern.h> 65 66 #include <machine/cpu.h> 67 #include <machine/pmap.h> 68 #include <machine/reg.h> 69 #include <machine/vmparam.h> 70 71 #ifdef ARMFPE 72 #include <arm/fpe-arm/armfpe.h> 73 #endif 74 75 extern pv_addr_t systempage; 76 77 int process_read_regs __P((struct proc *p, struct reg *regs)); 78 int process_read_fpregs __P((struct proc *p, struct fpreg *regs)); 79 80 void lwp_trampoline(void); 81 82 /* 83 * Special compilation symbols: 84 * 85 * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern 86 * on forking and check the pattern on exit, reporting 87 * the amount of stack used. 88 */ 89 90 void 91 cpu_proc_fork(p1, p2) 92 struct proc *p1, *p2; 93 { 94 95 #if defined(PERFCTRS) 96 if (PMC_ENABLED(p1)) 97 pmc_md_fork(p1, p2); 98 else { 99 p2->p_md.pmc_enabled = 0; 100 p2->p_md.pmc_state = NULL; 101 } 102 #endif 103 } 104 105 /* 106 * Finish a fork operation, with process p2 nearly set up. 107 * Copy and update the pcb and trap frame, making the child ready to run. 108 * 109 * Rig the child's kernel stack so that it will start out in 110 * proc_trampoline() and call child_return() with p2 as an 111 * argument. This causes the newly-created child process to go 112 * directly to user level with an apparent return value of 0 from 113 * fork(), while the parent process returns normally. 114 * 115 * p1 is the process being forked; if p1 == &proc0, we are creating 116 * a kernel thread, and the return path and argument are specified with 117 * `func' and `arg'. 118 * 119 * If an alternate user-level stack is requested (with non-zero values 120 * in both the stack and stacksize args), set up the user stack pointer 121 * accordingly. 122 */ 123 void 124 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 125 void (*func)(void *), void *arg) 126 { 127 struct pcb *pcb = (struct pcb *)&l2->l_addr->u_pcb; 128 struct trapframe *tf; 129 struct switchframe *sf; 130 131 #ifdef PMAP_DEBUG 132 if (pmap_debug_level >= 0) 133 printf("cpu_lwp_fork: %p %p %p %p\n", l1, l2, curlwp, &lwp0); 134 #endif /* PMAP_DEBUG */ 135 136 #if 0 /* XXX */ 137 if (l1 == curlwp) { 138 /* Sync the PCB before we copy it. */ 139 savectx(curpcb); 140 } 141 #endif 142 143 /* Copy the pcb */ 144 *pcb = l1->l_addr->u_pcb; 145 146 /* 147 * Set up the undefined stack for the process. 148 * Note: this stack is not in use if we are forking from p1 149 */ 150 pcb->pcb_un.un_32.pcb32_und_sp = (u_int)l2->l_addr + 151 USPACE_UNDEF_STACK_TOP; 152 pcb->pcb_un.un_32.pcb32_sp = (u_int)l2->l_addr + USPACE_SVC_STACK_TOP; 153 154 #ifdef STACKCHECKS 155 /* Fill the undefined stack with a known pattern */ 156 memset(((u_char *)l2->l_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd, 157 (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM)); 158 /* Fill the kernel stack with a known pattern */ 159 memset(((u_char *)l2->l_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd, 160 (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM)); 161 #endif /* STACKCHECKS */ 162 163 #ifdef PMAP_DEBUG 164 if (pmap_debug_level >= 0) { 165 printf("l1->procaddr=%p l1->procaddr->u_pcb=%p pid=%d pmap=%p\n", 166 l1->l_addr, &l1->l_addr->u_pcb, l1->l_lid, 167 l1->l_proc->p_vmspace->vm_map.pmap); 168 printf("l2->procaddr=%p l2->procaddr->u_pcb=%p pid=%d pmap=%p\n", 169 l2->l_addr, &l2->l_addr->u_pcb, l2->l_lid, 170 l2->l_proc->p_vmspace->vm_map.pmap); 171 } 172 #endif /* PMAP_DEBUG */ 173 174 #ifdef ARMFPE 175 /* Initialise a new FP context for p2 and copy the context from p1 */ 176 arm_fpe_core_initcontext(FP_CONTEXT(l2)); 177 arm_fpe_copycontext(FP_CONTEXT(l1), FP_CONTEXT(l2)); 178 #endif /* ARMFPE */ 179 180 l2->l_addr->u_pcb.pcb_tf = tf = 181 (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1; 182 *tf = *l1->l_addr->u_pcb.pcb_tf; 183 184 /* 185 * If specified, give the child a different stack. 186 */ 187 if (stack != NULL) 188 tf->tf_usr_sp = (u_int)stack + stacksize; 189 190 sf = (struct switchframe *)tf - 1; 191 sf->sf_r4 = (u_int)func; 192 sf->sf_r5 = (u_int)arg; 193 sf->sf_pc = (u_int)lwp_trampoline; 194 pcb->pcb_un.un_32.pcb32_sp = (u_int)sf; 195 } 196 197 /* 198 * cpu_exit is called as the last action during exit. 199 * 200 * We clean up a little and then call switch_exit() with the old proc as an 201 * argument. switch_exit() first switches to proc0's context, and finally 202 * jumps into switch() to wait for another process to wake up. 203 */ 204 205 void 206 cpu_lwp_free(struct lwp *l, int proc) 207 { 208 #ifdef ARMFPE 209 /* Abort any active FP operation and deactivate the context */ 210 arm_fpe_core_abort(FP_CONTEXT(l), NULL, NULL); 211 arm_fpe_core_changecontext(0); 212 #endif /* ARMFPE */ 213 214 #ifdef STACKCHECKS 215 /* Report how much stack has been used - debugging */ 216 if (l) { 217 u_char *ptr; 218 int loop; 219 220 ptr = ((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM; 221 for (loop = 0; loop < (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM) 222 && *ptr == 0xdd; ++loop, ++ptr) ; 223 log(LOG_INFO, "%d bytes of undefined stack fill pattern\n", loop); 224 ptr = ((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM; 225 for (loop = 0; loop < (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM) 226 && *ptr == 0xdd; ++loop, ++ptr) ; 227 log(LOG_INFO, "%d bytes of svc stack fill pattern\n", loop); 228 } 229 #endif /* STACKCHECKS */ 230 } 231 232 void 233 cpu_lwp_free2(struct lwp *l) 234 { 235 } 236 237 void 238 cpu_swapin(l) 239 struct lwp *l; 240 { 241 #if 0 242 struct proc *p = l->l_proc; 243 244 /* Don't do this. See the comment in cpu_swapout(). */ 245 #ifdef PMAP_DEBUG 246 if (pmap_debug_level >= 0) 247 printf("cpu_swapin(%p, %d, %s, %p)\n", l, l->l_lid, 248 p->p_comm, p->p_vmspace->vm_map.pmap); 249 #endif /* PMAP_DEBUG */ 250 251 if (vector_page < KERNEL_BASE) { 252 /* Map the vector page */ 253 pmap_enter(p->p_vmspace->vm_map.pmap, vector_page, 254 systempage.pv_pa, VM_PROT_READ, VM_PROT_READ|PMAP_WIRED); 255 pmap_update(p->p_vmspace->vm_map.pmap); 256 } 257 #endif 258 } 259 260 261 void 262 cpu_swapout(l) 263 struct lwp *l; 264 { 265 #if 0 266 struct proc *p = l->l_proc; 267 268 /* 269 * Don't do this! If the pmap is shared with another process, 270 * it will loose it's page0 entry. That's bad news indeed. 271 */ 272 #ifdef PMAP_DEBUG 273 if (pmap_debug_level >= 0) 274 printf("cpu_swapout(%p, %d, %s, %p)\n", l, l->l_lid, 275 p->p_comm, &p->p_vmspace->vm_map.pmap); 276 #endif /* PMAP_DEBUG */ 277 278 if (vector_page < KERNEL_BASE) { 279 /* Free the system page mapping */ 280 pmap_remove(p->p_vmspace->vm_map.pmap, vector_page, 281 vector_page + PAGE_SIZE); 282 pmap_update(p->p_vmspace->vm_map.pmap); 283 } 284 #endif 285 } 286 287 /* 288 * Map a user I/O request into kernel virtual address space. 289 * Note: the pages are already locked by uvm_vslock(), so we 290 * do not need to pass an access_type to pmap_enter(). 291 */ 292 void 293 vmapbuf(bp, len) 294 struct buf *bp; 295 vsize_t len; 296 { 297 vaddr_t faddr, taddr, off; 298 paddr_t fpa; 299 300 301 #ifdef PMAP_DEBUG 302 if (pmap_debug_level >= 0) 303 printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp, 304 (u_int)bp->b_data, (u_int)len); 305 #endif /* PMAP_DEBUG */ 306 307 if ((bp->b_flags & B_PHYS) == 0) 308 panic("vmapbuf"); 309 310 bp->b_saveaddr = bp->b_data; 311 faddr = trunc_page((vaddr_t)bp->b_data); 312 off = (vaddr_t)bp->b_data - faddr; 313 len = round_page(off + len); 314 taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 315 bp->b_data = (void *)(taddr + off); 316 317 /* 318 * The region is locked, so we expect that pmap_pte() will return 319 * non-NULL. 320 */ 321 while (len) { 322 (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), 323 faddr, &fpa); 324 pmap_enter(pmap_kernel(), taddr, fpa, 325 VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); 326 faddr += PAGE_SIZE; 327 taddr += PAGE_SIZE; 328 len -= PAGE_SIZE; 329 } 330 pmap_update(pmap_kernel()); 331 } 332 333 /* 334 * Unmap a previously-mapped user I/O request. 335 */ 336 void 337 vunmapbuf(bp, len) 338 struct buf *bp; 339 vsize_t len; 340 { 341 vaddr_t addr, off; 342 343 #ifdef PMAP_DEBUG 344 if (pmap_debug_level >= 0) 345 printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n", 346 (u_int)bp, (u_int)bp->b_data, (u_int)len); 347 #endif /* PMAP_DEBUG */ 348 349 if ((bp->b_flags & B_PHYS) == 0) 350 panic("vunmapbuf"); 351 352 /* 353 * Make sure the cache does not have dirty data for the 354 * pages we had mapped. 355 */ 356 addr = trunc_page((vaddr_t)bp->b_data); 357 off = (vaddr_t)bp->b_data - addr; 358 len = round_page(off + len); 359 360 pmap_remove(pmap_kernel(), addr, addr + len); 361 pmap_update(pmap_kernel()); 362 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY); 363 bp->b_data = bp->b_saveaddr; 364 bp->b_saveaddr = 0; 365 } 366 367 /* End of vm_machdep.c */ 368