xref: /netbsd-src/sys/arch/hppa/hppa/vm_machdep.c (revision 3b390fc259b5f1ecda9ea2fa6bef5a4905e6ea65)
1 /*	$NetBSD: vm_machdep.c,v 1.58 2023/02/25 08:30:31 skrll Exp $	*/
2 
3 /*	$OpenBSD: vm_machdep.c,v 1.64 2008/09/30 18:54:26 miod Exp $	*/
4 
5 /*
6  * Copyright (c) 1999-2004 Michael Shalayeff
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.58 2023/02/25 08:30:31 skrll Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/signalvar.h>
38 #include <sys/buf.h>
39 #include <sys/vnode.h>
40 #include <sys/ptrace.h>
41 #include <sys/exec.h>
42 #include <sys/core.h>
43 #include <sys/pool.h>
44 #include <sys/cpu.h>
45 
46 #include <machine/cpufunc.h>
47 #include <machine/pmap.h>
48 #include <machine/pcb.h>
49 
50 #include <uvm/uvm.h>
51 
52 extern struct pool hppa_fppl;
53 
54 #include <hppa/hppa/machdep.h>
55 
56 static inline void
cpu_activate_pcb(struct lwp * l)57 cpu_activate_pcb(struct lwp *l)
58 {
59 	struct trapframe *tf = l->l_md.md_regs;
60 	struct pcb *pcb = lwp_getpcb(l);
61 #ifdef DIAGNOSTIC
62 	vaddr_t uarea = (vaddr_t)pcb;
63 	vaddr_t maxsp = uarea + USPACE;
64 	KASSERT(tf == (void *)(uarea + PAGE_SIZE));
65 #endif
66 	/*
67 	 * Stash the physical address of FP regs for later perusal
68 	 */
69 	tf->tf_cr30 = (u_int)pcb->pcb_fpregs;
70 
71 #ifdef DIAGNOSTIC
72 	/* Create the kernel stack red zone. */
73 	pmap_remove(pmap_kernel(), maxsp - PAGE_SIZE, maxsp);
74 	pmap_update(pmap_kernel());
75 #endif
76 }
77 
78 void
cpu_proc_fork(struct proc * p1,struct proc * p2)79 cpu_proc_fork(struct proc *p1, struct proc *p2)
80 {
81 
82 	p2->p_md.md_flags = p1->p_md.md_flags;
83 }
84 
85 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)86 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
87     void (*func)(void *), void *arg)
88 {
89 	struct pcb *pcb1, *pcb2;
90 	struct trapframe *tf;
91 	register_t sp, osp;
92 	vaddr_t uv;
93 
94 	KASSERT(round_page(sizeof(struct pcb)) <= PAGE_SIZE);
95 
96 	pcb1 = lwp_getpcb(l1);
97 	pcb2 = lwp_getpcb(l2);
98 
99 	KASSERT(l2->l_md.md_astpending == 0);
100 	KASSERT(l2->l_md.md_flags == 0);
101 
102 	/* Flush the parent LWP out of the FPU. */
103 	hppa_fpu_flush(l1);
104 
105 	/* Now copy the parent PCB into the child. */
106 	memcpy(pcb2, pcb1, sizeof(struct pcb));
107 
108 	pcb2->pcb_fpregs = pool_get(&hppa_fppl, PR_WAITOK);
109 	*pcb2->pcb_fpregs = *pcb1->pcb_fpregs;
110 
111 	/* reset any of the pending FPU exceptions from parent */
112 	pcb2->pcb_fpregs->fpr_regs[0] =
113 	    HPPA_FPU_FORK(pcb2->pcb_fpregs->fpr_regs[0]);
114 	pcb2->pcb_fpregs->fpr_regs[1] = 0;
115 	pcb2->pcb_fpregs->fpr_regs[2] = 0;
116 	pcb2->pcb_fpregs->fpr_regs[3] = 0;
117 
118 	l2->l_md.md_bpva = l1->l_md.md_bpva;
119 	l2->l_md.md_bpsave[0] = l1->l_md.md_bpsave[0];
120 	l2->l_md.md_bpsave[1] = l1->l_md.md_bpsave[1];
121 
122 	uv = uvm_lwp_getuarea(l2);
123 	sp = (register_t)uv + PAGE_SIZE;
124 	tf = l2->l_md.md_regs = (struct trapframe *)sp;
125 	sp += sizeof(struct trapframe);
126 
127 	/* copy the l1's trapframe to l2 */
128 	memcpy(tf, l1->l_md.md_regs, sizeof(*tf));
129 
130 	/* Fill out all the PAs we are going to need in locore. */
131 	cpu_activate_pcb(l2);
132 
133 	if (__predict_true(l2->l_proc->p_vmspace != NULL)) {
134 		hppa_setvmspace(l2);
135 		/*
136 		 * theoretically these could be inherited from the father,
137 		 * but just in case.
138 		 */
139 		mfctl(CR_EIEM, tf->tf_eiem);
140 		tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ |
141 		    (curcpu()->ci_psw & PSW_O);
142 	}
143 
144 	/*
145 	 * Set up return value registers as libc:fork() expects
146 	 */
147 	tf->tf_ret0 = l1->l_proc->p_pid;
148 	tf->tf_ret1 = 1;	/* ischild */
149 	tf->tf_t1 = 0;		/* errno */
150 
151 	/*
152 	 * If specified, give the child a different stack.
153 	 */
154 	if (stack != NULL)
155 		tf->tf_sp = (register_t)stack;
156 
157 	/*
158 	 * Build stack frames for the cpu_switchto & co.
159 	 */
160 	osp = sp;
161 
162 	/* lwp_trampoline's frame */
163 	sp += HPPA_FRAME_SIZE;
164 
165 	*(register_t *)(sp) = 0;	/* previous frame pointer */
166 	*(register_t *)(sp + HPPA_FRAME_PSP) = osp;
167 	*(register_t *)(sp + HPPA_FRAME_CRP) = (register_t)lwp_trampoline;
168 
169 	*HPPA_FRAME_CARG(2, sp) = KERNMODE(func);
170 	*HPPA_FRAME_CARG(3, sp) = (register_t)arg;
171 
172 	/*
173 	 * cpu_switchto's frame
174 	 * 	stack usage is std frame + callee-save registers
175 	 */
176 	sp += HPPA_FRAME_SIZE + 16*4;
177 	pcb2->pcb_ksp = sp;
178 	fdcache(HPPA_SID_KERNEL, uv, sp - uv);
179 }
180 
181 void
cpu_lwp_free(struct lwp * l,int proc)182 cpu_lwp_free(struct lwp *l, int proc)
183 {
184 	struct pcb *pcb = lwp_getpcb(l);
185 
186 	/*
187 	 * If this thread was using the FPU, disable the FPU and record
188 	 * that it's unused.
189 	 */
190 
191 	hppa_fpu_flush(l);
192 	pool_put(&hppa_fppl, pcb->pcb_fpregs);
193 }
194 
195 void
cpu_lwp_free2(struct lwp * l)196 cpu_lwp_free2(struct lwp *l)
197 {
198 
199 	(void)l;
200 }
201 
202 /*
203  * Map an IO request into kernel virtual address space.
204  */
205 int
vmapbuf(struct buf * bp,vsize_t len)206 vmapbuf(struct buf *bp, vsize_t len)
207 {
208 	vaddr_t uva, kva;
209 	paddr_t pa;
210 	vsize_t size, off;
211 	int npf;
212 	struct pmap *upmap, *kpmap;
213 
214 #ifdef DIAGNOSTIC
215 	if ((bp->b_flags & B_PHYS) == 0)
216 		panic("vmapbuf");
217 #endif
218 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
219 	kpmap = vm_map_pmap(phys_map);
220 	bp->b_saveaddr = bp->b_data;
221 	uva = trunc_page((vaddr_t)bp->b_data);
222 	off = (vaddr_t)bp->b_data - uva;
223 	size = round_page(off + len);
224 	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
225 	bp->b_data = (void *)(kva + off);
226 	npf = btoc(size);
227 	while (npf--) {
228 		if (pmap_extract(upmap, uva, &pa) == false)
229 			panic("vmapbuf: null page frame");
230 		pmap_enter(kpmap, kva, pa,
231 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
232 		uva += PAGE_SIZE;
233 		kva += PAGE_SIZE;
234 	}
235 	pmap_update(kpmap);
236 
237 	return 0;
238 }
239 
240 /*
241  * Unmap IO request from the kernel virtual address space.
242  */
243 void
vunmapbuf(struct buf * bp,vsize_t len)244 vunmapbuf(struct buf *bp, vsize_t len)
245 {
246 	struct pmap *pmap;
247 	vaddr_t kva;
248 	vsize_t off;
249 
250 #ifdef DIAGNOSTIC
251 	if ((bp->b_flags & B_PHYS) == 0)
252 		panic("vunmapbuf");
253 #endif
254 	kva = trunc_page((vaddr_t)bp->b_data);
255 	off = (vaddr_t)bp->b_data - kva;
256 	len = round_page(off + len);
257 	pmap = vm_map_pmap(phys_map);
258 	pmap_remove(pmap, kva, kva + len);
259 	pmap_update(pmap);
260 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
261 	bp->b_data = bp->b_saveaddr;
262 	bp->b_saveaddr = NULL;
263 }
264 
265 int
cpu_lwp_setprivate(lwp_t * l,void * addr)266 cpu_lwp_setprivate(lwp_t *l, void *addr)
267 {
268 
269 	l->l_md.md_regs->tf_cr27 = (u_int)addr;
270 	if (l == curlwp)
271 		mtctl(addr, CR_TLS);
272 	return 0;
273 }
274 
275