1 /* $OpenBSD: vm_machdep.c,v 1.29 2023/04/11 00:45:07 jsg Exp $ */
2 /* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
3
4 /*
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Brini.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * RiscBSD kernel project
39 *
40 * vm_machdep.h
41 *
42 * vm machine specific bits
43 *
44 * Created : 08/10/94
45 */
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <machine/cpu.h>
56 #include <machine/pmap.h>
57
58 #include <arm/vfp.h>
59
60 extern pv_addr_t systempage;
61
62 int process_read_regs (struct proc *p, struct reg *regs);
63 int process_read_fpregs (struct proc *p, struct fpreg *regs);
64
65 extern void proc_trampoline (void);
66
67 /*
68 * Finish a fork operation, with process p2 nearly set up.
69 * Copy and update the pcb and trap frame, making the child ready to run.
70 *
71 * Rig the child's kernel stack so that it will start out in
72 * proc_trampoline() and call 'func' with 'arg' as an argument.
73 * For normal processes this is child_return(), which causes the
74 * child to go directly to user level with an apparent return value
75 * of 0 from fork(), while the parent process returns normally.
76 * For kernel threads this will be a function that never returns.
77 *
78 * An alternate user-level stack or TCB can be requested by passing
79 * a non-NULL value; these are poked into the PCB so they're in
80 * effect at the initial return to userspace.
81 */
82 void
cpu_fork(struct proc * p1,struct proc * p2,void * stack,void * tcb,void (* func)(void *),void * arg)83 cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
84 void (*func)(void *), void *arg)
85 {
86 struct pcb *pcb = &p2->p_addr->u_pcb;
87 struct trapframe *tf;
88 struct switchframe *sf;
89
90 if (p1 == curproc) {
91 /* Sync the PCB before we copy it. */
92 savectx(curpcb);
93 }
94
95 /* Copy the pcb */
96 *pcb = p1->p_addr->u_pcb;
97
98 /*
99 * Set up the undefined stack for the process.
100 * Note: this stack is not in use if we are forking from p1
101 */
102 pcb->pcb_un.un_32.pcb32_und_sp = (u_int)p2->p_addr +
103 USPACE_UNDEF_STACK_TOP;
104 pcb->pcb_un.un_32.pcb32_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
105
106 pmap_activate(p2);
107
108 pcb->pcb_tf = tf = (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
109 *tf = *p1->p_addr->u_pcb.pcb_tf;
110
111 /*
112 * If specified, give the child a different stack and/or TCB.
113 * Enforce 8-byte alignment on the stack.
114 */
115 if (stack != NULL)
116 tf->tf_usr_sp = (vaddr_t)stack & -8;
117 if (tcb != NULL)
118 p2->p_addr->u_pcb.pcb_tcb = tcb;
119
120 sf = (struct switchframe *)tf - 1;
121 sf->sf_r4 = (u_int)func;
122 sf->sf_r5 = (u_int)arg;
123 sf->sf_pc = (u_int)proc_trampoline;
124 pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
125 }
126
127 void
cpu_exit(struct proc * p)128 cpu_exit(struct proc *p)
129 {
130 /* If we were using the FPU, forget about it. */
131 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
132 vfp_discard(p);
133
134 pmap_deactivate(p);
135 sched_exit(p);
136 }
137
138 struct kmem_va_mode kv_physwait = {
139 .kv_map = &phys_map,
140 .kv_wait = 1,
141 };
142
143 /*
144 * Map a user I/O request into kernel virtual address space.
145 * Note: the pages are already locked by uvm_vslock(), so we
146 * do not need to pass an access_type to pmap_enter().
147 */
148 void
vmapbuf(struct buf * bp,vsize_t len)149 vmapbuf(struct buf *bp, vsize_t len)
150 {
151 vaddr_t faddr, taddr, off;
152 paddr_t fpa;
153
154 if ((bp->b_flags & B_PHYS) == 0)
155 panic("vmapbuf");
156 faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
157 off = (vaddr_t)bp->b_data - faddr;
158 len = round_page(off + len);
159 taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
160 bp->b_data = (caddr_t)(taddr + off);
161 /*
162 * The region is locked, so we expect that pmap_pte() will return
163 * non-NULL.
164 * XXX: unwise to expect this in a multithreaded environment.
165 * anything can happen to a pmap between the time we lock a
166 * region, release the pmap lock, and then relock it for
167 * the pmap_extract().
168 *
169 * no need to flush TLB since we expect nothing to be mapped
170 * where we just allocated (TLB will be flushed when our
171 * mapping is removed).
172 */
173 while (len) {
174 (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
175 faddr, &fpa);
176 pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
177 faddr += PAGE_SIZE;
178 taddr += PAGE_SIZE;
179 len -= PAGE_SIZE;
180 }
181 pmap_update(pmap_kernel());
182 }
183
184 /*
185 * Unmap a previously-mapped user I/O request.
186 */
187 void
vunmapbuf(struct buf * bp,vsize_t len)188 vunmapbuf(struct buf *bp, vsize_t len)
189 {
190 vaddr_t addr, off;
191
192 if ((bp->b_flags & B_PHYS) == 0)
193 panic("vunmapbuf");
194 addr = trunc_page((vaddr_t)bp->b_data);
195 off = (vaddr_t)bp->b_data - addr;
196 len = round_page(off + len);
197 pmap_kremove(addr, len);
198 pmap_update(pmap_kernel());
199 km_free((void *)addr, len, &kv_physwait, &kp_none);
200 bp->b_data = bp->b_saveaddr;
201 bp->b_saveaddr = NULL;
202 }
203