1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_machdep.c 8.1 (Berkeley) 6/11/93 43 * 44 * from: Header: vm_machdep.c,v 1.10 92/11/26 03:05:11 torek Exp (LBL) 45 * $Id: vm_machdep.c,v 1.1 1993/10/02 10:24:32 deraadt Exp $ 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/user.h> 52 #include <sys/malloc.h> 53 #include <sys/buf.h> 54 #include <sys/exec.h> 55 #include <sys/vnode.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_kern.h> 59 60 #include <machine/cpu.h> 61 #include <machine/frame.h> 62 63 /* 64 * Move pages from one kernel virtual address to another. 65 */ 66 pagemove(from, to, size) 67 register caddr_t from, to; 68 int size; 69 { 70 register vm_offset_t pa; 71 72 if (size & CLOFSET || (int)from & CLOFSET || (int)to & CLOFSET) 73 panic("pagemove 1"); 74 while (size > 0) { 75 pa = pmap_extract(kernel_pmap, (vm_offset_t)from); 76 if (pa == 0) 77 panic("pagemove 2"); 78 pmap_remove(kernel_pmap, 79 (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE); 80 pmap_enter(kernel_pmap, 81 (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1); 82 from += PAGE_SIZE; 83 to += PAGE_SIZE; 84 size -= PAGE_SIZE; 85 } 86 } 87 88 /* 89 * Map an IO request into kernel virtual address space. 90 * 91 * ### pmap_enter distributes this mapping to all contexts ... maybe 92 * we should avoid this extra work 93 * 94 * THIS IS NOT IDEAL -- WE NEED ONLY VIRTUAL SPACE BUT kmem_alloc_wait 95 * DOES WORK DESIGNED TO SUPPLY PHYSICAL SPACE ON DEMAND LATER 96 */ 97 vmapbuf(bp) 98 register struct buf *bp; 99 { 100 register int npf; 101 register caddr_t addr; 102 struct proc *p; 103 int off; 104 vm_offset_t kva; 105 register vm_offset_t pa; 106 107 if ((bp->b_flags & B_PHYS) == 0) 108 panic("vmapbuf"); 109 addr = bp->b_saveaddr = bp->b_un.b_addr; 110 off = (int)addr & PGOFSET; 111 p = bp->b_proc; 112 npf = btoc(round_page(bp->b_bcount + off)); 113 kva = kmem_alloc_wait(phys_map, ctob(npf)); 114 bp->b_un.b_addr = (caddr_t) (kva + off); 115 while (npf--) { 116 pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), 117 (vm_offset_t)addr); 118 if (pa == 0) 119 panic("vmapbuf: null page frame"); 120 pmap_enter(vm_map_pmap(phys_map), kva, 121 trunc_page(pa) | PMAP_NC, 122 VM_PROT_READ|VM_PROT_WRITE, 1); 123 addr += PAGE_SIZE; 124 kva += PAGE_SIZE; 125 } 126 } 127 128 /* 129 * Free the io map addresses associated with this IO operation. 130 */ 131 vunmapbuf(bp) 132 register struct buf *bp; 133 { 134 register vm_offset_t kva = (vm_offset_t)bp->b_un.b_addr; 135 register int off, npf; 136 137 if ((bp->b_flags & B_PHYS) == 0) 138 panic("vunmapbuf"); 139 off = (int)kva & PGOFSET; 140 kva -= off; 141 npf = btoc(round_page(bp->b_bcount + off)); 142 kmem_free_wakeup(phys_map, kva, ctob(npf)); 143 bp->b_un.b_addr = bp->b_saveaddr; 144 bp->b_saveaddr = NULL; 145 cache_flush(bp->b_un.b_addr, bp->b_bcount - bp->b_resid); 146 } 147 148 /* 149 * Allocate physical memory space in the dvma virtual address range. 150 */ 151 caddr_t 152 dvma_malloc(size) 153 size_t size; 154 { 155 vm_size_t vsize; 156 caddr_t va; 157 158 vsize = round_page(size); 159 va = (caddr_t)kmem_alloc(phys_map, vsize); 160 if (va == NULL) 161 panic("dvma_malloc"); 162 kvm_uncache(va, vsize >> PGSHIFT); 163 return (va); 164 } 165 166 /* 167 * The offset of the topmost frame in the kernel stack. 168 */ 169 #define TOPFRAMEOFF (UPAGES*NBPG-sizeof(struct trapframe)-sizeof(struct frame)) 170 171 /* 172 * Finish a fork operation, with process p2 nearly set up. 173 * Copy and update the kernel stack and pcb, making the child 174 * ready to run, and marking it so that it can return differently 175 * than the parent. Returns 1 in the child process, 0 in the parent. 176 * 177 * This function relies on the fact that the pcb is 178 * the first element in struct user. 179 */ 180 cpu_fork(p1, p2) 181 register struct proc *p1, *p2; 182 { 183 register struct pcb *opcb = &p1->p_addr->u_pcb; 184 register struct pcb *npcb = &p2->p_addr->u_pcb; 185 register u_int sp, topframe, off, ssize; 186 187 /* 188 * Save all the registers to p1's stack or, in the case of 189 * user registers and invalid stack pointers, to opcb. 190 * snapshot() also sets the given pcb's pcb_sp and pcb_psr 191 * to the current %sp and %psr, and sets pcb_pc to a stub 192 * which returns 1. We then copy the whole pcb to p2; 193 * when swtch() selects p2 to run, it will run at the stub, 194 * rather than at the copying code below, and cpu_fork 195 * will return 1. 196 * 197 * Note that the order `*npcb = *opcb, snapshot(npcb)' is wrong, 198 * as user registers might then wind up only in opcb. 199 * We could call save_user_windows first, 200 * but that would only save 3 stores anyway. 201 * 202 * If process p1 has an FPU state, we must copy it. If it is 203 * the FPU user, we must save the FPU state first. 204 */ 205 snapshot(opcb); 206 bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb)); 207 if (p1->p_md.md_fpstate) { 208 if (p1 == fpproc) 209 savefpstate(p1->p_md.md_fpstate); 210 p2->p_md.md_fpstate = malloc(sizeof(struct fpstate), 211 M_SUBPROC, M_WAITOK); 212 bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate, 213 sizeof(struct fpstate)); 214 } else 215 p2->p_md.md_fpstate = NULL; 216 217 /* 218 * Copy the active part of the kernel stack, 219 * then adjust each kernel sp -- the frame pointer 220 * in the top frame is a user sp -- in the child's copy, 221 * including the initial one in the child's pcb. 222 */ 223 sp = npcb->pcb_sp; /* points to old kernel stack */ 224 ssize = (u_int)opcb + UPAGES * NBPG - sp; 225 if (ssize >= UPAGES * NBPG - sizeof(struct pcb)) 226 panic("cpu_fork 1"); 227 off = (u_int)npcb - (u_int)opcb; 228 qcopy((caddr_t)sp, (caddr_t)sp + off, ssize); 229 sp += off; 230 npcb->pcb_sp = sp; 231 topframe = (u_int)npcb + TOPFRAMEOFF; 232 while (sp < topframe) 233 sp = ((struct rwindow *)sp)->rw_in[6] += off; 234 if (sp != topframe) 235 panic("cpu_fork 2"); 236 /* 237 * This might be unnecessary, but it may be possible for the child 238 * to run in ptrace or sendsig before it returns from fork. 239 */ 240 p2->p_md.md_tf = (struct trapframe *)((int)p1->p_md.md_tf + off); 241 return (0); 242 } 243 244 /* 245 * cpu_exit is called as the last action during exit. 246 * We release the address space and machine-dependent resources, 247 * including the memory for the user structure and kernel stack. 248 * Since the latter is also the interrupt stack, we release it 249 * from assembly code after switching to a temporary pcb+stack. 250 */ 251 cpu_exit(p) 252 struct proc *p; 253 { 254 register struct fpstate *fs; 255 256 if ((fs = p->p_md.md_fpstate) != NULL) { 257 if (p == fpproc) { 258 savefpstate(fs); 259 fpproc = NULL; 260 } 261 free((void *)fs, M_SUBPROC); 262 } 263 vmspace_free(p->p_vmspace); 264 swtchexit(kernel_map, p->p_addr, round_page(ctob(UPAGES))); 265 /* NOTREACHED */ 266 } 267 268 /* 269 * cpu_coredump is called to write a core dump header. 270 * (should this be defined elsewhere? machdep.c?) 271 */ 272 int 273 cpu_coredump(p, vp, cred) 274 struct proc *p; 275 struct vnode *vp; 276 struct ucred *cred; 277 { 278 register struct user *up = p->p_addr; 279 280 up->u_md.md_tf = *p->p_md.md_tf; 281 if (p->p_md.md_fpstate) 282 up->u_md.md_fpstate = *p->p_md.md_fpstate; 283 else 284 bzero((caddr_t)&up->u_md.md_fpstate, sizeof(struct fpstate)); 285 return (vn_rdwr(UIO_WRITE, vp, (caddr_t)up, ctob(UPAGES), (off_t)0, 286 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p)); 287 } 288