1 /* $NetBSD: sig_machdep.c,v 1.55 2023/12/20 15:29:06 thorpej Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: sig_machdep.c,v 1.55 2023/12/20 15:29:06 thorpej Exp $");
36
37 #ifdef _KERNEL_OPT
38 #include "opt_altivec.h"
39 #include "opt_ppcarch.h"
40 #endif
41
42 #include <sys/param.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/syscallargs.h>
46 #include <sys/systm.h>
47 #include <sys/ucontext.h>
48 #include <sys/cpu.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #include <powerpc/fpu.h>
53 #include <powerpc/altivec.h>
54 #include <powerpc/pcb.h>
55 #include <powerpc/psl.h>
56
57 /* Assert that the sizes of these two structures are multiples of 16. */
58 CTASSERT((sizeof(siginfo_t) & (CALLFRAMELEN-1)) == 0);
59 CTASSERT((sizeof(ucontext_t) & (CALLFRAMELEN-1)) == 0);
60
61 /*
62 * Send a signal to process.
63 */
64 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)65 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
66 {
67 struct lwp * const l = curlwp;
68 struct proc * const p = l->l_proc;
69 struct trapframe * const tf = l->l_md.md_utf;
70 stack_t * const ss = &l->l_sigstk;
71 const struct sigact_sigdesc * const sd =
72 &p->p_sigacts->sa_sigdesc[ksi->ksi_signo];
73 /* save handler before sendsig_reset trashes it! */
74 const void * const handler = sd->sd_sigact.sa_handler;
75 ucontext_t uc;
76 vaddr_t sp, sip, ucp;
77 int onstack, error;
78
79 /* Do we need to jump onto the signal stack? */
80 onstack = (ss->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
81 (sd->sd_sigact.sa_flags & SA_ONSTACK) != 0;
82
83 /* Find top of stack. */
84 sp = (onstack ? (vaddr_t)ss->ss_sp + ss->ss_size : tf->tf_fixreg[1]);
85
86 /* Ensure it is aligned. */
87 sp &= ~(CALLFRAMELEN-1);
88
89 /* Allocate space for the ucontext. */
90 sp -= sizeof(ucontext_t);
91
92 /* Allocate space for the siginfo. */
93 sp -= sizeof(siginfo_t);
94
95 #if 0 /* Not needed; see CTASSERTs above. */
96 /* Align it again. */
97 sp &= ~(CALLFRAMELEN-1);
98 #endif
99
100 sip = sp;
101 ucp = sp + sizeof(siginfo_t);
102
103 KASSERT((sip & (CALLFRAMELEN-1)) == 0);
104 KASSERT((ucp & (CALLFRAMELEN-1)) == 0);
105
106 /*
107 * Now allocate space for a call frame, so that there's
108 * space for the ABI-mandated stack linkage area in the
109 * event the signal handler calls a another function.
110 */
111 sp -= CALLFRAMELEN;
112
113 /* Save register context. */
114 memset(&uc, 0, sizeof(uc));
115 uc.uc_flags = _UC_SIGMASK;
116 uc.uc_flags |= (ss->ss_flags & SS_ONSTACK) ?
117 _UC_SETSTACK : _UC_CLRSTACK;
118 uc.uc_sigmask = *mask;
119 uc.uc_link = l->l_ctxlink;
120 sendsig_reset(l, ksi->ksi_signo);
121 mutex_exit(p->p_lock);
122 cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
123
124 /*
125 * Copy the siginfo and ucontext onto the user's stack.
126 */
127 error = (copyout(&ksi->ksi_info, (void *)sip, sizeof(ksi->ksi_info)) != 0 ||
128 copyout(&uc, (void *)ucp, sizeof(uc)) != 0);
129 mutex_enter(p->p_lock);
130
131 if (error) {
132 /*
133 * Process has trashed its stack; give it an illegal
134 * instruction to halt it in its tracks.
135 */
136 sigexit(l, SIGILL);
137 /* NOTREACHED */
138 }
139
140 /*
141 * Build context to run handler in. Note the trampoline version
142 * numbers are coordinated with machine-dependent code in libc.
143 */
144 switch (sd->sd_vers) {
145 case __SIGTRAMP_SIGINFO_VERSION: /* siginfo sigtramp */
146 tf->tf_fixreg[1] = (register_t)sp;
147 tf->tf_fixreg[3] = (register_t)ksi->ksi_signo;
148 tf->tf_fixreg[4] = (register_t)sip;
149 tf->tf_fixreg[5] = (register_t)ucp;
150 /* Preserve ucp across call to signal function */
151 tf->tf_fixreg[30] = (register_t)ucp;
152 tf->tf_lr = (register_t)sd->sd_tramp;
153 tf->tf_srr0 = (register_t)handler;
154 break;
155
156 default:
157 goto nosupport;
158 }
159
160 /* Remember that we're now on the signal stack. */
161 if (onstack)
162 ss->ss_flags |= SS_ONSTACK;
163 return;
164
165 nosupport:
166 /* Don't know what trampoline version; kill it. */
167 printf("sendsig_siginfo(sig %d): bad version %d\n",
168 ksi->ksi_signo, sd->sd_vers);
169 sigexit(l, SIGILL);
170 /* NOTREACHED */
171 }
172
173 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flagp)174 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flagp)
175 {
176 const struct trapframe * const tf = l->l_md.md_utf;
177 __greg_t * const gr = mcp->__gregs;
178 #if defined(PPC_HAVE_FPU)
179 struct pcb * const pcb = lwp_getpcb(l);
180 #endif
181
182 /* Save GPR context. */
183 (void)memcpy(gr, &tf->tf_fixreg, 32 * sizeof (gr[0])); /* GR0-31 */
184 gr[_REG_CR] = tf->tf_cr;
185 gr[_REG_LR] = tf->tf_lr;
186 gr[_REG_PC] = tf->tf_srr0;
187 gr[_REG_MSR] = tf->tf_srr1 & PSL_USERSRR1;
188 #ifdef PPC_HAVE_FPU
189 gr[_REG_MSR] |= pcb->pcb_flags & (PCB_FE0|PCB_FE1);
190 #endif
191 gr[_REG_CTR] = tf->tf_ctr;
192 gr[_REG_XER] = tf->tf_xer;
193 #ifdef PPC_OEA
194 gr[_REG_MQ] = tf->tf_mq;
195 #else
196 gr[_REG_MQ] = 0;
197 #endif
198
199 *flagp |= _UC_CPU;
200 *flagp |= _UC_TLSBASE;
201
202 #ifdef PPC_HAVE_FPU
203 /* Save FPU context, if any. */
204 if (!fpu_save_to_mcontext(l, mcp, flagp))
205 #endif
206 memset(&mcp->__fpregs, 0, sizeof(mcp->__fpregs));
207
208 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
209 /* Save vector context, if any. */
210 if (!vec_save_to_mcontext(l, mcp, flagp))
211 #endif
212 memset(&mcp->__vrf, 0, sizeof (mcp->__vrf));
213 }
214
215 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)216 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
217 {
218 return 0;
219 }
220
221 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)222 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
223 {
224 struct trapframe * const tf = l->l_md.md_utf;
225 const __greg_t * const gr = mcp->__gregs;
226 struct proc * const p = l->l_proc;
227 int error;
228
229 /* Restore GPR context, if any. */
230 if (flags & _UC_CPU) {
231 error = cpu_mcontext_validate(l, mcp);
232 if (error)
233 return error;
234
235 #ifdef PPC_HAVE_FPU
236 /*
237 * Always save the FP exception mode in the PCB.
238 */
239 struct pcb * const pcb = lwp_getpcb(l);
240 pcb->pcb_flags &= ~(PCB_FE0|PCB_FE1);
241 pcb->pcb_flags |= gr[_REG_MSR] & (PCB_FE0|PCB_FE1);
242 #endif
243
244 /*
245 * R2 is the TLS register so avoid updating it here.
246 */
247
248 __greg_t save_r2 = tf->tf_fixreg[_REG_R2];
249 (void)memcpy(&tf->tf_fixreg, gr, 32 * sizeof (gr[0]));
250 tf->tf_fixreg[_REG_R2] = save_r2;
251 tf->tf_cr = gr[_REG_CR];
252 tf->tf_lr = gr[_REG_LR];
253 tf->tf_srr0 = gr[_REG_PC];
254
255 /*
256 * Accept all user-settable bits without complaint;
257 * userland should not need to know the machine-specific
258 * MSR value.
259 */
260 tf->tf_srr1 = (gr[_REG_MSR] & PSL_USERMOD) | PSL_USERSET;
261 tf->tf_ctr = gr[_REG_CTR];
262 tf->tf_xer = gr[_REG_XER];
263 #ifdef PPC_OEA
264 tf->tf_mq = gr[_REG_MQ];
265 #endif
266 }
267
268 if (flags & _UC_TLSBASE)
269 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_R2]);
270
271 #ifdef PPC_HAVE_FPU
272 /* Restore FPU context, if any. */
273 if (flags & _UC_FPU)
274 fpu_restore_from_mcontext(l, mcp);
275 #endif
276
277 #ifdef ALTIVEC
278 /* Restore AltiVec context, if any. */
279 if (flags & _UC_POWERPC_VEC)
280 vec_restore_from_mcontext(l, mcp);
281 #endif
282
283 #ifdef PPC_HAVE_SPE
284 /* Restore SPE context, if any. */
285 if (flags & _UC_POWERPC_SPE)
286 vec_restore_from_mcontext(l, mcp);
287 #endif
288
289 mutex_enter(p->p_lock);
290 if (flags & _UC_SETSTACK)
291 l->l_sigstk.ss_flags |= SS_ONSTACK;
292 if (flags & _UC_CLRSTACK)
293 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
294 mutex_exit(p->p_lock);
295
296 return (0);
297 }
298
299 int
cpu_lwp_setprivate(lwp_t * l,void * addr)300 cpu_lwp_setprivate(lwp_t *l, void *addr)
301 {
302 struct trapframe * const tf = l->l_md.md_utf;
303
304 tf->tf_fixreg[_REG_R2] = (register_t)addr;
305
306 return 0;
307 }
308