1 /* $NetBSD: vm_machdep.c,v 1.13 2005/12/11 12:17:59 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ 36 * 37 * @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94 38 */ 39 /* 40 * Copyright (c) 1988 University of Utah. 41 * 42 * This code is derived from software contributed to Berkeley by 43 * the Systems Programming Group of the University of Utah Computer 44 * Science Department. 45 * 46 * Redistribution and use in source and binary forms, with or without 47 * modification, are permitted provided that the following conditions 48 * are met: 49 * 1. Redistributions of source code must retain the above copyright 50 * notice, this list of conditions and the following disclaimer. 51 * 2. Redistributions in binary form must reproduce the above copyright 52 * notice, this list of conditions and the following disclaimer in the 53 * documentation and/or other materials provided with the distribution. 54 * 3. All advertising materials mentioning features or use of this software 55 * must display the following acknowledgement: 56 * This product includes software developed by the University of 57 * California, Berkeley and its contributors. 58 * 4. Neither the name of the University nor the names of its contributors 59 * may be used to endorse or promote products derived from this software 60 * without specific prior written permission. 61 * 62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * SUCH DAMAGE. 73 * 74 * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ 75 * 76 * @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2005/12/11 12:17:59 christos Exp $"); 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/proc.h> 85 #include <sys/malloc.h> 86 #include <sys/buf.h> 87 #include <sys/vnode.h> 88 #include <sys/user.h> 89 #include <sys/core.h> 90 #include <sys/exec.h> 91 92 #include <machine/frame.h> 93 #include <machine/cpu.h> 94 #include <machine/pte.h> 95 #include <machine/reg.h> 96 97 #include <uvm/uvm_extern.h> 98 99 void 100 cpu_proc_fork(struct proc *p1, struct proc *p2) 101 { 102 103 p2->p_md.mdp_flags = p1->p_md.mdp_flags; 104 } 105 106 /* 107 * Finish a fork operation, with process l2 nearly set up. 108 * Copy and update the pcb and trap frame, making the child ready to run. 109 * 110 * Rig the child's kernel stack so that it will start out in 111 * proc_trampoline() and call child_return() with l2 as an 112 * argument. This causes the newly-created child process to go 113 * directly to user level with an apparent return value of 0 from 114 * fork(), while the parent process returns normally. 115 * 116 * l1 is the process being forked; if l1 == &lwp0, we are creating 117 * a kernel thread, and the return path and argument are specified with 118 * `func' and `arg'. 119 * 120 * If an alternate user-level stack is requested (with non-zero values 121 * in both the stack and stacksize args), set up the user stack pointer 122 * accordingly. 123 */ 124 void 125 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 126 void (*func)(void *), void *arg) 127 { 128 struct pcb *pcb = &l2->l_addr->u_pcb; 129 struct trapframe *tf; 130 struct switchframe *sf; 131 extern struct pcb *curpcb; 132 133 l2->l_md.md_flags = l1->l_md.md_flags; 134 135 /* Copy pcb from lwp l1 to l2. */ 136 if (l1 == curlwp) { 137 /* Sync the PCB before we copy it. */ 138 savectx(curpcb); 139 } 140 #ifdef DIAGNOSTIC 141 else if (l1 != &lwp0) 142 panic("cpu_lwp_fork: curlwp"); 143 #endif 144 *pcb = l1->l_addr->u_pcb; 145 146 /* 147 * Copy the trap frame. 148 */ 149 tf = (struct trapframe *)((u_int)l2->l_addr + USPACE) - 1; 150 l2->l_md.md_regs = (int *)tf; 151 *tf = *(struct trapframe *)l1->l_md.md_regs; 152 153 /* 154 * If specified, give the child a different stack. 155 */ 156 if (stack != NULL) 157 tf->tf_regs[15] = (u_int)stack + stacksize; 158 159 sf = (struct switchframe *)tf - 1; 160 sf->sf_pc = (u_int)proc_trampoline; 161 pcb->pcb_regs[6] = (int)func; /* A2 */ 162 pcb->pcb_regs[7] = (int)arg; /* A3 */ 163 pcb->pcb_regs[11] = (int)sf; /* SSP */ 164 } 165 166 void 167 cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg) 168 { 169 struct pcb *pcb = &l->l_addr->u_pcb; 170 struct trapframe *tf = (struct trapframe *)l->l_md.md_regs; 171 struct switchframe *sf = (struct switchframe *)tf - 1; 172 extern void proc_trampoline __P((void)); 173 174 sf->sf_pc = (int)proc_trampoline; 175 pcb->pcb_regs[6] = (int)func; /* A2 */ 176 pcb->pcb_regs[7] = (int)arg; /* A3 */ 177 pcb->pcb_regs[11] = (int)sf; /* SSP */ 178 } 179 180 void 181 cpu_lwp_free(struct lwp *l, int proc) 182 { 183 184 /* Nothing to do */ 185 } 186 187 /* 188 * cpu_exit is called as the last action during exit. 189 * 190 * Block context switches and then call switch_exit() which will 191 * switch to another process thus we never return. 192 */ 193 void 194 cpu_exit(struct lwp *l) 195 { 196 197 (void) splhigh(); 198 switch_lwp_exit(l); 199 /* NOTREACHED */ 200 } 201 202 /* 203 * Dump the machine specific header information at the start of a core dump. 204 */ 205 struct md_core { 206 struct reg intreg; 207 struct fpreg freg; 208 }; 209 210 int 211 cpu_coredump(struct lwp *l, void *iocookie, struct core *chdr) 212 { 213 struct md_core md_core; 214 struct coreseg cseg; 215 int error; 216 217 if (iocookie == NULL) { 218 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); 219 chdr->c_hdrsize = ALIGN(sizeof(*chdr)); 220 chdr->c_seghdrsize = ALIGN(sizeof(cseg)); 221 chdr->c_cpusize = sizeof(md_core); 222 chdr->c_nseg++; 223 return 0; 224 } 225 226 /* Save integer registers. */ 227 error = process_read_regs(l, &md_core.intreg); 228 if (error) 229 return error; 230 231 if (fputype) { 232 /* Save floating point registers. */ 233 error = process_read_fpregs(l, &md_core.freg); 234 if (error) 235 return error; 236 } else { 237 /* Make sure these are clear. */ 238 memset((caddr_t)&md_core.freg, 0, sizeof(md_core.freg)); 239 } 240 241 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); 242 cseg.c_addr = 0; 243 cseg.c_size = chdr->c_cpusize; 244 245 error = coredump_write(iocookie, UIO_SYSSPACE, &cseg, 246 chdr->c_seghdrsize); 247 if (error) 248 return error; 249 250 return coredump_write(iocookie, UIO_SYSSPACE, &md_core, 251 sizeof(md_core)); 252 } 253 254 /* 255 * Map a user I/O request into kernel virtual address space. 256 * Note: the pages are already locked by uvm_vslock(), so we 257 * do not need to pass an access_type to pmap_enter(). 258 */ 259 void 260 vmapbuf(struct buf *bp, vsize_t len) 261 { 262 struct pmap *upmap, *kpmap; 263 vaddr_t uva; /* User VA (map from) */ 264 vaddr_t kva; /* Kernel VA (new to) */ 265 paddr_t pa; /* physical address */ 266 vsize_t off; 267 268 if ((bp->b_flags & B_PHYS) == 0) 269 panic("vmapbuf"); 270 271 uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data); 272 off = (vaddr_t)bp->b_data - uva; 273 len = m68k_round_page(off + len); 274 kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 275 bp->b_data = (caddr_t)(kva + off); 276 277 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 278 kpmap = vm_map_pmap(phys_map); 279 do { 280 if (pmap_extract(upmap, uva, &pa) == FALSE) 281 panic("vmapbuf: null page frame"); 282 #ifdef M68K_VAC 283 pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE, 284 PMAP_WIRED); 285 #else 286 pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE); 287 #endif 288 uva += PAGE_SIZE; 289 kva += PAGE_SIZE; 290 len -= PAGE_SIZE; 291 } while (len); 292 pmap_update(kpmap); 293 } 294 295 /* 296 * Unmap a previously-mapped user I/O request. 297 */ 298 void 299 vunmapbuf(struct buf *bp, vsize_t len) 300 { 301 vaddr_t kva; 302 vsize_t off; 303 304 if ((bp->b_flags & B_PHYS) == 0) 305 panic("vunmapbuf"); 306 307 kva = m68k_trunc_page(bp->b_data); 308 off = (vaddr_t)bp->b_data - kva; 309 len = m68k_round_page(off + len); 310 311 #ifdef M68K_VAC 312 pmap_remove(vm_map_pmap(phys_map), kva, kva + len); 313 #else 314 pmap_kremove(kva, len); 315 #endif 316 pmap_update(pmap_kernel()); 317 uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY); 318 bp->b_data = bp->b_saveaddr; 319 bp->b_saveaddr = 0; 320 } 321 322 323 #if defined(M68K_MMU_MOTOROLA) || defined(M68K_MMU_HP) 324 325 #include <m68k/cacheops.h> 326 327 /* 328 * Map `size' bytes of physical memory starting at `paddr' into 329 * kernel VA space at `vaddr'. Read/write and cache-inhibit status 330 * are specified by `prot'. 331 */ 332 void 333 physaccess(caddr_t vaddr, caddr_t paddr, int size, int prot) 334 { 335 pt_entry_t *pte; 336 u_int page; 337 338 pte = kvtopte(vaddr); 339 page = (u_int)paddr & PG_FRAME; 340 for (size = btoc(size); size; size--) { 341 *pte++ = PG_V | prot | page; 342 page += PAGE_SIZE; 343 } 344 TBIAS(); 345 } 346 347 void 348 physunaccess(caddr_t vaddr, int size) 349 { 350 pt_entry_t *pte; 351 352 pte = kvtopte(vaddr); 353 for (size = btoc(size); size; size--) 354 *pte++ = PG_NV; 355 TBIAS(); 356 } 357 358 /* 359 * Convert kernel VA to physical address 360 */ 361 int 362 kvtop(caddr_t addr) 363 { 364 paddr_t pa; 365 366 if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE) 367 panic("kvtop: zero page frame"); 368 return((int)pa); 369 } 370 371 #endif 372