1 /* $NetBSD: vm_machdep.c,v 1.74 2003/06/23 11:01:42 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * This product includes software developed by Harvard University. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. All advertising materials mentioning features or use of this software 28 * must display the following acknowledgement: 29 * This product includes software developed by Harvard University. 30 * This product includes software developed by the University of 31 * California, Berkeley and its contributors. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)vm_machdep.c 8.2 (Berkeley) 9/23/93 49 */ 50 51 #include "opt_multiprocessor.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/user.h> 57 #include <sys/core.h> 58 #include <sys/malloc.h> 59 #include <sys/buf.h> 60 #include <sys/exec.h> 61 #include <sys/vnode.h> 62 63 #include <uvm/uvm_extern.h> 64 65 #include <machine/cpu.h> 66 #include <machine/frame.h> 67 #include <machine/trap.h> 68 69 #include <sparc/sparc/cpuvar.h> 70 71 /* 72 * Move pages from one kernel virtual address to another. 73 */ 74 void 75 pagemove(from, to, size) 76 caddr_t from, to; 77 size_t size; 78 { 79 paddr_t pa; 80 81 if (size & PGOFSET || (int)from & PGOFSET || (int)to & PGOFSET) 82 panic("pagemove 1"); 83 while (size > 0) { 84 if (pmap_extract(pmap_kernel(), (vaddr_t)from, &pa) == FALSE) 85 panic("pagemove 2"); 86 pmap_kremove((vaddr_t)from, PAGE_SIZE); 87 pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE); 88 from += PAGE_SIZE; 89 to += PAGE_SIZE; 90 size -= PAGE_SIZE; 91 } 92 pmap_update(pmap_kernel()); 93 } 94 95 96 /* 97 * Map a user I/O request into kernel virtual address space. 98 * Note: the pages are already locked by uvm_vslock(), so we 99 * do not need to pass an access_type to pmap_enter(). 100 */ 101 void 102 vmapbuf(bp, len) 103 struct buf *bp; 104 vsize_t len; 105 { 106 struct pmap *upmap, *kpmap; 107 vaddr_t uva; /* User VA (map from) */ 108 vaddr_t kva; /* Kernel VA (new to) */ 109 paddr_t pa; /* physical address */ 110 vsize_t off; 111 112 if ((bp->b_flags & B_PHYS) == 0) 113 panic("vmapbuf"); 114 115 /* 116 * XXX: It might be better to round/trunc to a 117 * segment boundary to avoid VAC problems! 118 */ 119 bp->b_saveaddr = bp->b_data; 120 uva = trunc_page((vaddr_t)bp->b_data); 121 off = (vaddr_t)bp->b_data - uva; 122 len = round_page(off + len); 123 kva = uvm_km_valloc_wait(kernel_map, len); 124 bp->b_data = (caddr_t)(kva + off); 125 126 /* 127 * We have to flush any write-back cache on the 128 * user-space mappings so our new mappings will 129 * have the correct contents. 130 */ 131 if (CACHEINFO.c_vactype != VAC_NONE) 132 cache_flush((caddr_t)uva, len); 133 134 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 135 kpmap = vm_map_pmap(kernel_map); 136 do { 137 if (pmap_extract(upmap, uva, &pa) == FALSE) 138 panic("vmapbuf: null page frame"); 139 /* Now map the page into kernel space. */ 140 pmap_enter(kpmap, kva, pa, 141 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 142 uva += PAGE_SIZE; 143 kva += PAGE_SIZE; 144 len -= PAGE_SIZE; 145 } while (len); 146 pmap_update(kpmap); 147 } 148 149 /* 150 * Unmap a previously-mapped user I/O request. 151 */ 152 void 153 vunmapbuf(bp, len) 154 struct buf *bp; 155 vsize_t len; 156 { 157 vaddr_t kva; 158 vsize_t off; 159 160 if ((bp->b_flags & B_PHYS) == 0) 161 panic("vunmapbuf"); 162 163 kva = trunc_page((vaddr_t)bp->b_data); 164 off = (vaddr_t)bp->b_data - kva; 165 len = round_page(off + len); 166 pmap_remove(vm_map_pmap(kernel_map), kva, kva + len); 167 pmap_update(vm_map_pmap(kernel_map)); 168 uvm_km_free_wakeup(kernel_map, kva, len); 169 bp->b_data = bp->b_saveaddr; 170 bp->b_saveaddr = NULL; 171 172 #if 0 /* XXX: The flush above is sufficient, right? */ 173 if (CACHEINFO.c_vactype != VAC_NONE) 174 cpuinfo.cache_flush(bp->b_data, len); 175 #endif 176 } 177 178 179 /* 180 * The offset of the topmost frame in the kernel stack. 181 */ 182 #define TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-sizeof(struct frame)) 183 184 /* 185 * Finish a fork operation, with process l2 nearly set up. 186 * Copy and update the pcb and trap frame, making the child ready to run. 187 * 188 * Rig the child's kernel stack so that it will start out in 189 * proc_trampoline() and call child_return() with l2 as an 190 * argument. This causes the newly-created child process to go 191 * directly to user level with an apparent return value of 0 from 192 * fork(), while the parent process returns normally. 193 * 194 * l1 is the process being forked; if l1 == &lwp0, we are creating 195 * a kernel thread, and the return path and argument are specified with 196 * `func' and `arg'. 197 * 198 * If an alternate user-level stack is requested (with non-zero values 199 * in both the stack and stacksize args), set up the user stack pointer 200 * accordingly. 201 */ 202 void 203 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg) 204 struct lwp *l1, *l2; 205 void *stack; 206 size_t stacksize; 207 void (*func) __P((void *)); 208 void *arg; 209 { 210 struct pcb *opcb = &l1->l_addr->u_pcb; 211 struct pcb *npcb = &l2->l_addr->u_pcb; 212 struct trapframe *tf2; 213 struct rwindow *rp; 214 215 /* 216 * Save all user registers to l1's stack or, in the case of 217 * user registers and invalid stack pointers, to opcb. 218 * We then copy the whole pcb to p2; when switch() selects p2 219 * to run, it will run at the `proc_trampoline' stub, rather 220 * than returning at the copying code below. 221 * 222 * If process l1 has an FPU state, we must copy it. If it is 223 * the FPU user, we must save the FPU state first. 224 */ 225 226 if (l1 == curlwp) { 227 write_user_windows(); 228 opcb->pcb_psr = getpsr(); 229 } 230 #ifdef DIAGNOSTIC 231 else if (l1 != &lwp0) 232 panic("cpu_lwp_fork: curlwp"); 233 #endif 234 235 bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb)); 236 if (l1->l_md.md_fpstate != NULL) { 237 struct cpu_info *cpi; 238 int s; 239 240 l2->l_md.md_fpstate = malloc(sizeof(struct fpstate), 241 M_SUBPROC, M_WAITOK); 242 243 FPU_LOCK(s); 244 if ((cpi = l1->l_md.md_fpu) != NULL) { 245 if (cpi->fplwp != l1) 246 panic("FPU(%d): fplwp %p", 247 cpi->ci_cpuid, cpi->fplwp); 248 if (l1 == cpuinfo.fplwp) 249 savefpstate(l1->l_md.md_fpstate); 250 #if defined(MULTIPROCESSOR) 251 else 252 XCALL1(savefpstate, l1->l_md.md_fpstate, 253 1 << cpi->ci_cpuid); 254 #endif 255 } 256 bcopy(l1->l_md.md_fpstate, l2->l_md.md_fpstate, 257 sizeof(struct fpstate)); 258 FPU_UNLOCK(s); 259 } else 260 l2->l_md.md_fpstate = NULL; 261 262 l2->l_md.md_fpu = NULL; 263 264 /* 265 * Setup (kernel) stack frame that will by-pass the child 266 * out of the kernel. (The trap frame invariably resides at 267 * the tippity-top of the u. area.) 268 */ 269 tf2 = l2->l_md.md_tf = (struct trapframe *) 270 ((int)npcb + USPACE - sizeof(*tf2)); 271 272 /* Copy parent's trapframe */ 273 *tf2 = *(struct trapframe *)((int)opcb + USPACE - sizeof(*tf2)); 274 275 /* 276 * If specified, give the child a different stack. 277 */ 278 if (stack != NULL) 279 tf2->tf_out[6] = (u_int)stack + stacksize; 280 281 /* 282 * The fork system call always uses the old system call 283 * convention; clear carry and skip trap instruction as 284 * in syscall(). 285 * note: proc_trampoline() sets a fresh psr when returning 286 * to user mode. 287 */ 288 /*tf2->tf_psr &= ~PSR_C; -* success */ 289 tf2->tf_pc = tf2->tf_npc; 290 tf2->tf_npc = tf2->tf_pc + 4; 291 292 /* Set return values in child mode */ 293 tf2->tf_out[0] = 0; 294 tf2->tf_out[1] = 1; 295 296 /* Construct kernel frame to return to in cpu_switch() */ 297 rp = (struct rwindow *)((u_int)npcb + TOPFRAMEOFF); 298 rp->rw_local[0] = (int)func; /* Function to call */ 299 rp->rw_local[1] = (int)arg; /* and its argument */ 300 301 npcb->pcb_pc = (int)proc_trampoline - 8; 302 npcb->pcb_sp = (int)rp; 303 npcb->pcb_psr &= ~PSR_CWP; /* Run in window #0 */ 304 npcb->pcb_wim = 1; /* Fence at window #1 */ 305 } 306 307 /* 308 * cpu_exit is called as the last action during exit. 309 * 310 * We clean up the FPU state and then call switchexit() with the old proc 311 * as an argument. switchexit() switches to the idle context, schedules 312 * the old vmspace and stack to be freed, then selects a new process to 313 * run. 314 * 315 * If proc==0, we're an exiting lwp and arrange to call lwp_exit2() instead 316 * of exit2(). 317 */ 318 void 319 cpu_exit(l, proc) 320 struct lwp *l; 321 int proc; 322 { 323 struct fpstate *fs; 324 325 if ((fs = l->l_md.md_fpstate) != NULL) { 326 struct cpu_info *cpi; 327 int s; 328 329 FPU_LOCK(s); 330 if ((cpi = l->l_md.md_fpu) != NULL) { 331 if (cpi->fplwp != l) 332 panic("FPU(%d): fplwp %p", 333 cpi->ci_cpuid, cpi->fplwp); 334 if (l == cpuinfo.fplwp) 335 savefpstate(fs); 336 #if defined(MULTIPROCESSOR) 337 else 338 XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid); 339 #endif 340 cpi->fplwp = NULL; 341 } 342 l->l_md.md_fpu = NULL; 343 FPU_UNLOCK(s); 344 l->l_md.md_fpstate = NULL; 345 free((void *)fs, M_SUBPROC); 346 } 347 switchexit(l, proc ? exit2 : lwp_exit2); 348 /* NOTREACHED */ 349 } 350 351 void 352 cpu_setfunc(l, func, arg) 353 struct lwp *l; 354 void (*func) __P((void *)); 355 void *arg; 356 { 357 struct pcb *pcb = &l->l_addr->u_pcb; 358 /*struct trapframe *tf = l->l_md.md_tf;*/ 359 struct rwindow *rp; 360 361 /* Construct kernel frame to return to in cpu_switch() */ 362 rp = (struct rwindow *)((u_int)pcb + TOPFRAMEOFF); 363 rp->rw_local[0] = (int)func; /* Function to call */ 364 rp->rw_local[1] = (int)arg; /* and its argument */ 365 366 pcb->pcb_pc = (int)proc_trampoline - 8; 367 pcb->pcb_sp = (int)rp; 368 pcb->pcb_psr &= ~PSR_CWP; /* Run in window #0 */ 369 pcb->pcb_wim = 1; /* Fence at window #1 */ 370 } 371 372 /* 373 * cpu_coredump is called to write a core dump header. 374 * (should this be defined elsewhere? machdep.c?) 375 */ 376 int 377 cpu_coredump(l, vp, cred, chdr) 378 struct lwp *l; 379 struct vnode *vp; 380 struct ucred *cred; 381 struct core *chdr; 382 { 383 int error; 384 struct md_coredump md_core; 385 struct coreseg cseg; 386 struct proc *p; 387 388 p = l->l_proc; 389 390 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); 391 chdr->c_hdrsize = ALIGN(sizeof(*chdr)); 392 chdr->c_seghdrsize = ALIGN(sizeof(cseg)); 393 chdr->c_cpusize = sizeof(md_core); 394 395 md_core.md_tf = *l->l_md.md_tf; 396 if (l->l_md.md_fpstate) { 397 if (l == cpuinfo.fplwp) 398 savefpstate(l->l_md.md_fpstate); 399 md_core.md_fpstate = *l->l_md.md_fpstate; 400 } else 401 bzero((caddr_t)&md_core.md_fpstate, sizeof(struct fpstate)); 402 403 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); 404 cseg.c_addr = 0; 405 cseg.c_size = chdr->c_cpusize; 406 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, 407 (off_t)chdr->c_hdrsize, UIO_SYSSPACE, 408 IO_NODELOCKED|IO_UNIT, cred, NULL, p); 409 if (error) 410 return error; 411 412 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core), 413 (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE, 414 IO_NODELOCKED|IO_UNIT, cred, NULL, p); 415 if (!error) 416 chdr->c_nseg++; 417 418 return error; 419 } 420