1 /* $NetBSD: vm_machdep.c,v 1.56 2000/06/29 07:40:12 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * This product includes software developed by Harvard University. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. All advertising materials mentioning features or use of this software 28 * must display the following acknowledgement: 29 * This product includes software developed by Harvard University. 30 * This product includes software developed by the University of 31 * California, Berkeley and its contributors. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)vm_machdep.c 8.2 (Berkeley) 9/23/93 49 */ 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/user.h> 55 #include <sys/core.h> 56 #include <sys/malloc.h> 57 #include <sys/buf.h> 58 #include <sys/exec.h> 59 #include <sys/vnode.h> 60 #include <sys/map.h> 61 62 #include <uvm/uvm_extern.h> 63 64 #include <machine/cpu.h> 65 #include <machine/frame.h> 66 #include <machine/trap.h> 67 68 #include <sparc/sparc/cpuvar.h> 69 70 /* 71 * Move pages from one kernel virtual address to another. 72 */ 73 void 74 pagemove(from, to, size) 75 caddr_t from, to; 76 size_t size; 77 { 78 paddr_t pa; 79 80 if (size & PGOFSET || (int)from & PGOFSET || (int)to & PGOFSET) 81 panic("pagemove 1"); 82 while (size > 0) { 83 if (pmap_extract(pmap_kernel(), (vaddr_t)from, &pa) == FALSE) 84 panic("pagemove 2"); 85 pmap_remove(pmap_kernel(), 86 (vaddr_t)from, (vaddr_t)from + PAGE_SIZE); 87 pmap_enter(pmap_kernel(), 88 (vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 89 VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); 90 from += PAGE_SIZE; 91 to += PAGE_SIZE; 92 size -= PAGE_SIZE; 93 } 94 } 95 96 97 /* 98 * Map a user I/O request into kernel virtual address space. 99 * Note: the pages are already locked by uvm_vslock(), so we 100 * do not need to pass an access_type to pmap_enter(). 101 */ 102 void 103 vmapbuf(bp, len) 104 struct buf *bp; 105 vsize_t len; 106 { 107 struct pmap *upmap, *kpmap; 108 vaddr_t uva; /* User VA (map from) */ 109 vaddr_t kva; /* Kernel VA (new to) */ 110 paddr_t pa; /* physical address */ 111 vsize_t off; 112 113 if ((bp->b_flags & B_PHYS) == 0) 114 panic("vmapbuf"); 115 116 /* 117 * XXX: It might be better to round/trunc to a 118 * segment boundary to avoid VAC problems! 119 */ 120 bp->b_saveaddr = bp->b_data; 121 uva = trunc_page((vaddr_t)bp->b_data); 122 off = (vaddr_t)bp->b_data - uva; 123 len = round_page(off + len); 124 kva = uvm_km_valloc_wait(kernel_map, len); 125 bp->b_data = (caddr_t)(kva + off); 126 127 /* 128 * We have to flush any write-back cache on the 129 * user-space mappings so our new mappings will 130 * have the correct contents. 131 */ 132 if (CACHEINFO.c_vactype != VAC_NONE) 133 cpuinfo.cache_flush((caddr_t)uva, len); 134 135 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 136 kpmap = vm_map_pmap(kernel_map); 137 do { 138 if (pmap_extract(upmap, uva, &pa) == FALSE) 139 panic("vmapbuf: null page frame"); 140 /* Now map the page into kernel space. */ 141 pmap_enter(kpmap, kva, pa, 142 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 143 uva += PAGE_SIZE; 144 kva += PAGE_SIZE; 145 len -= PAGE_SIZE; 146 } while (len); 147 } 148 149 /* 150 * Unmap a previously-mapped user I/O request. 151 */ 152 void 153 vunmapbuf(bp, len) 154 struct buf *bp; 155 vsize_t len; 156 { 157 vaddr_t kva; 158 vsize_t off; 159 160 if ((bp->b_flags & B_PHYS) == 0) 161 panic("vunmapbuf"); 162 163 kva = trunc_page((vaddr_t)bp->b_data); 164 off = (vaddr_t)bp->b_data - kva; 165 len = round_page(off + len); 166 167 /* This will call pmap_remove() for us. */ 168 uvm_km_free_wakeup(kernel_map, kva, len); 169 bp->b_data = bp->b_saveaddr; 170 bp->b_saveaddr = NULL; 171 172 #if 0 /* XXX: The flush above is sufficient, right? */ 173 if (CACHEINFO.c_vactype != VAC_NONE) 174 cpuinfo.cache_flush(bp->b_data, len); 175 #endif 176 } 177 178 179 /* 180 * The offset of the topmost frame in the kernel stack. 181 */ 182 #define TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-sizeof(struct frame)) 183 184 /* 185 * Finish a fork operation, with process p2 nearly set up. 186 * Copy and update the pcb and trap frame, making the child ready to run. 187 * 188 * Rig the child's kernel stack so that it will start out in 189 * proc_trampoline() and call child_return() with p2 as an 190 * argument. This causes the newly-created child process to go 191 * directly to user level with an apparent return value of 0 from 192 * fork(), while the parent process returns normally. 193 * 194 * p1 is the process being forked; if p1 == &proc0, we are creating 195 * a kernel thread, and the return path and argument are specified with 196 * `func' and `arg'. 197 * 198 * If an alternate user-level stack is requested (with non-zero values 199 * in both the stack and stacksize args), set up the user stack pointer 200 * accordingly. 201 */ 202 void 203 cpu_fork(p1, p2, stack, stacksize, func, arg) 204 struct proc *p1, *p2; 205 void *stack; 206 size_t stacksize; 207 void (*func) __P((void *)); 208 void *arg; 209 { 210 struct pcb *opcb = &p1->p_addr->u_pcb; 211 struct pcb *npcb = &p2->p_addr->u_pcb; 212 struct trapframe *tf2; 213 struct rwindow *rp; 214 215 /* 216 * Save all user registers to p1's stack or, in the case of 217 * user registers and invalid stack pointers, to opcb. 218 * We then copy the whole pcb to p2; when switch() selects p2 219 * to run, it will run at the `proc_trampoline' stub, rather 220 * than returning at the copying code below. 221 * 222 * If process p1 has an FPU state, we must copy it. If it is 223 * the FPU user, we must save the FPU state first. 224 */ 225 226 if (p1 == curproc) { 227 write_user_windows(); 228 opcb->pcb_psr = getpsr(); 229 } 230 #ifdef DIAGNOSTIC 231 else if (p1 != &proc0) 232 panic("cpu_fork: curproc"); 233 #endif 234 235 bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb)); 236 if (p1->p_md.md_fpstate) { 237 if (p1 == cpuinfo.fpproc) 238 savefpstate(p1->p_md.md_fpstate); 239 else if (p1->p_md.md_fpumid != -1) 240 panic("FPU on module %d; fix this", p1->p_md.md_fpumid); 241 p2->p_md.md_fpstate = malloc(sizeof(struct fpstate), 242 M_SUBPROC, M_WAITOK); 243 bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate, 244 sizeof(struct fpstate)); 245 } else 246 p2->p_md.md_fpstate = NULL; 247 248 p2->p_md.md_fpumid = -1; 249 250 /* 251 * Setup (kernel) stack frame that will by-pass the child 252 * out of the kernel. (The trap frame invariably resides at 253 * the tippity-top of the u. area.) 254 */ 255 tf2 = p2->p_md.md_tf = (struct trapframe *) 256 ((int)npcb + USPACE - sizeof(*tf2)); 257 258 /* Copy parent's trapframe */ 259 *tf2 = *(struct trapframe *)((int)opcb + USPACE - sizeof(*tf2)); 260 261 /* 262 * If specified, give the child a different stack. 263 */ 264 if (stack != NULL) 265 tf2->tf_out[6] = (u_int)stack + stacksize; 266 267 /* Duplicate efforts of syscall(), but slightly differently */ 268 if (tf2->tf_global[1] & SYSCALL_G2RFLAG) { 269 /* jmp %g2 (or %g7, deprecated) on success */ 270 tf2->tf_npc = tf2->tf_global[2]; 271 } else { 272 /* 273 * old system call convention: clear C on success 274 * note: proc_trampoline() sets a fresh psr when 275 * returning to user mode. 276 */ 277 /*tf2->tf_psr &= ~PSR_C; -* success */ 278 } 279 280 /* Set return values in child mode */ 281 tf2->tf_out[0] = 0; 282 tf2->tf_out[1] = 1; 283 284 /* Construct kernel frame to return to in cpu_switch() */ 285 rp = (struct rwindow *)((u_int)npcb + TOPFRAMEOFF); 286 rp->rw_local[0] = (int)func; /* Function to call */ 287 rp->rw_local[1] = (int)arg; /* and its argument */ 288 289 npcb->pcb_pc = (int)proc_trampoline - 8; 290 npcb->pcb_sp = (int)rp; 291 npcb->pcb_psr &= ~PSR_CWP; /* Run in window #0 */ 292 npcb->pcb_wim = 1; /* Fence at window #1 */ 293 294 } 295 296 /* 297 * cpu_exit is called as the last action during exit. 298 * 299 * We clean up a little and then call switchexit() with the old proc 300 * as an argument. switchexit() switches to the idle context, schedules 301 * the old vmspace and stack to be freed, then selects a new process to 302 * run. 303 */ 304 void 305 cpu_exit(p) 306 struct proc *p; 307 { 308 struct fpstate *fs; 309 310 if ((fs = p->p_md.md_fpstate) != NULL) { 311 if (p == cpuinfo.fpproc) { 312 savefpstate(fs); 313 cpuinfo.fpproc = NULL; 314 } 315 free((void *)fs, M_SUBPROC); 316 } 317 switchexit(p); 318 /* NOTREACHED */ 319 } 320 321 /* 322 * cpu_coredump is called to write a core dump header. 323 * (should this be defined elsewhere? machdep.c?) 324 */ 325 int 326 cpu_coredump(p, vp, cred, chdr) 327 struct proc *p; 328 struct vnode *vp; 329 struct ucred *cred; 330 struct core *chdr; 331 { 332 int error; 333 struct md_coredump md_core; 334 struct coreseg cseg; 335 336 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); 337 chdr->c_hdrsize = ALIGN(sizeof(*chdr)); 338 chdr->c_seghdrsize = ALIGN(sizeof(cseg)); 339 chdr->c_cpusize = sizeof(md_core); 340 341 md_core.md_tf = *p->p_md.md_tf; 342 if (p->p_md.md_fpstate) { 343 if (p == cpuinfo.fpproc) 344 savefpstate(p->p_md.md_fpstate); 345 md_core.md_fpstate = *p->p_md.md_fpstate; 346 } else 347 bzero((caddr_t)&md_core.md_fpstate, sizeof(struct fpstate)); 348 349 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); 350 cseg.c_addr = 0; 351 cseg.c_size = chdr->c_cpusize; 352 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, 353 (off_t)chdr->c_hdrsize, UIO_SYSSPACE, 354 IO_NODELOCKED|IO_UNIT, cred, NULL, p); 355 if (error) 356 return error; 357 358 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core), 359 (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE, 360 IO_NODELOCKED|IO_UNIT, cred, NULL, p); 361 if (!error) 362 chdr->c_nseg++; 363 364 return error; 365 } 366