xref: /netbsd-src/sys/arch/powerpc/booke/spe.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: spe.c,v 1.2 2011/01/18 01:02:52 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: spe.c,v 1.2 2011/01/18 01:02:52 matt Exp $");
34 
35 #include "opt_altivec.h"
36 
37 #ifdef PPC_HAVE_SPE
38 
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42 #include <sys/atomic.h>
43 #include <sys/siginfo.h>
44 
45 #include <powerpc/altivec.h>
46 #include <powerpc/spr.h>
47 #include <powerpc/booke/spr.h>
48 #include <powerpc/psl.h>
49 #include <powerpc/pcb.h>
50 
51 void
52 vec_enable(void)
53 {
54 	struct cpu_info * const ci = curcpu();
55 	lwp_t * const l = curlwp;
56 
57 	l->l_md.md_flags |= MDLWP_USEDVEC;
58 
59 	/*
60 	 * Enable SPE temporarily (and disable interrupts).
61 	 */
62 	const register_t msr = mfmsr();
63 	mtmsr((msr & ~PSL_EE) | PSL_SPV);
64 	__asm volatile ("isync");
65 
66 	if (ci->ci_veclwp != l) {
67 		struct pcb * const pcb = lwp_getpcb(l);
68 		/*
69 		 * Save the existing state (if any).
70 		 */
71 		vec_save_cpu(VEC_SAVE_AND_RELEASE);
72 
73 		/*
74 		 * Call an assembly routine to do load everything.
75 		 */
76 		vec_load_from_vreg(&pcb->pcb_vr);
77 
78 		/*
79 		 * Enable SPE when we return to user-mode (we overload the
80 		 * ALTIVEC flags).  Record the new ownership of the SPE unit.
81 		 */
82 		ci->ci_veclwp = l;
83 		l->l_md.md_veccpu = ci;
84 	}
85 	__asm volatile ("sync");
86 	l->l_md.md_flags |= MDLWP_OWNVEC;
87 
88 	/*
89 	 * Restore MSR (turn off SPE)
90 	 */
91 	mtmsr(msr);
92 }
93 
94 void
95 vec_save_cpu(enum vec_op op)
96 {
97 	/*
98 	 * Turn on SPE, turn off interrupts.
99 	 */
100 	const register_t msr = mfmsr();
101 	mtmsr((msr & ~PSL_EE) | PSL_SPV);
102 	__asm volatile ("isync");
103 
104 	struct cpu_info * const ci = curcpu();
105 	lwp_t * const l = ci->ci_veclwp;
106 
107 	KASSERTMSG(l->l_md.md_veccpu == ci,
108 	    ("%s: veccpu (%p) != ci (%p)\n", __func__, l->l_md.md_veccpu, ci));
109 	if (l->l_md.md_flags & MDLWP_OWNVEC) {
110 		struct pcb * const pcb = lwp_getpcb(l);
111 
112 		/*
113 		 * Save the vector state which is best done in assembly.
114 		 */
115 		vec_unload_to_vreg(&pcb->pcb_vr);
116 
117 		/*
118 		 * Indicate that VEC unit is unloaded
119 		 */
120 		l->l_md.md_flags &= ~MDLWP_OWNVEC;
121 
122 		/*
123 		 * If asked to, give up the VEC unit.
124 		 */
125 		if (op == VEC_SAVE_AND_RELEASE)
126 			ci->ci_veclwp = ci->ci_data.cpu_idlelwp;
127 	}
128 
129 	/*
130 	 * Restore MSR (turn off SPE)
131 	 */
132 	mtmsr(msr);
133 }
134 
135 /*
136  * Save a lwp's SPE state to its PCB.  The lwp must either be curlwp or traced
137  * by curlwp (and stopped).  (The point being that the lwp must not be onproc
138  * on another CPU during this function).
139  */
140 void
141 vec_save_lwp(lwp_t *l, enum vec_op op)
142 {
143 	struct cpu_info * const ci = curcpu();
144 
145 	/*
146 	 * If it's already in the PCB, there's nothing to do.
147 	 */
148 	if ((l->l_md.md_flags & MDLWP_OWNVEC) == 0)
149 		return;
150 
151 	/*
152 	 * If we simply need to discard the information, then don't
153 	 * to save anything.
154 	 */
155 	if (op == VEC_DISCARD) {
156 		struct cpu_info * const veccpu = l->l_md.md_veccpu;
157 #ifndef MULTIPROCESSOR
158 		KASSERT(ci == veccpu);
159 #endif
160 		KASSERT(l == veccpu->ci_veclwp);
161 		KASSERT(l == curlwp || ci == veccpu);
162 		ci->ci_veclwp = ci->ci_data.cpu_idlelwp;
163 		atomic_and_uint(&l->l_md.md_flags, ~MDLWP_OWNVEC);
164 		return;
165 	}
166 
167 	KASSERT(l == ci->ci_veclwp);
168 	vec_save_cpu(op);
169 }
170 
171 void
172 vec_restore_from_mcontext(lwp_t *l, const mcontext_t *mcp)
173 {
174 	struct pcb * const pcb = lwp_getpcb(l);
175 	const union __vr *vr = mcp->__vrf.__vrs;
176 
177 	vec_save_lwp(l, VEC_DISCARD);
178 
179 	/* grab the accumulator */
180 	pcb->pcb_vr.vreg[8][0] = vr->__vr32[2];
181 	pcb->pcb_vr.vreg[8][1] = vr->__vr32[3];
182 
183 	/*
184 	 * We store the high parts of each register in the first 8 vectors.
185 	 */
186 	for (u_int i = 0; i < 8; i++, vr += 4) {
187 		pcb->pcb_vr.vreg[i][0] = vr[0].__vr32[0];
188 		pcb->pcb_vr.vreg[i][1] = vr[1].__vr32[0];
189 		pcb->pcb_vr.vreg[i][2] = vr[2].__vr32[0];
190 		pcb->pcb_vr.vreg[i][3] = vr[3].__vr32[0];
191 	}
192 	l->l_md.md_utf->tf_spefscr = pcb->pcb_vr.vscr = mcp->__vrf.__vscr;
193 	pcb->pcb_vr.vrsave = mcp->__vrf.__vrsave;
194 }
195 
196 bool
197 vec_save_to_mcontext(lwp_t *l, mcontext_t *mcp, unsigned int *flagp)
198 {
199 	struct pcb * const pcb = lwp_getpcb(l);
200 
201 	if ((l->l_md.md_flags & MDLWP_USEDVEC) == 0)
202 		return false;
203 
204 	vec_save_lwp(l, VEC_SAVE);
205 
206 	mcp->__gregs[_REG_MSR] |= PSL_SPV;
207 
208 	union __vr *vr = mcp->__vrf.__vrs;
209 	const register_t *fixreg = l->l_md.md_utf->tf_fixreg;
210 	for (u_int i = 0; i < 32; i++, vr += 4, fixreg += 4) {
211 		vr[0].__vr32[0] = pcb->pcb_vr.vreg[i][0];
212 		vr[0].__vr32[1] = fixreg[0];
213 		vr[0].__vr32[2] = 0;
214 		vr[0].__vr32[3] = 0;
215 		vr[1].__vr32[0] = pcb->pcb_vr.vreg[i][1];
216 		vr[1].__vr32[1] = fixreg[1];
217 		vr[1].__vr32[2] = 0;
218 		vr[1].__vr32[3] = 0;
219 		vr[2].__vr32[0] = pcb->pcb_vr.vreg[i][2];
220 		vr[2].__vr32[1] = fixreg[2];
221 		vr[2].__vr32[2] = 0;
222 		vr[2].__vr32[3] = 0;
223 		vr[3].__vr32[0] = pcb->pcb_vr.vreg[i][3];
224 		vr[3].__vr32[1] = fixreg[3];
225 		vr[3].__vr32[2] = 0;
226 		vr[3].__vr32[3] = 0;
227 	}
228 
229 	mcp->__vrf.__vrs[0].__vr32[2] = pcb->pcb_vr.vreg[8][0];
230 	mcp->__vrf.__vrs[0].__vr32[3] = pcb->pcb_vr.vreg[8][1];
231 
232 	mcp->__vrf.__vrsave = pcb->pcb_vr.vrsave;
233 	mcp->__vrf.__vscr = l->l_md.md_utf->tf_spefscr;
234 
235 	*flagp |= _UC_POWERPC_SPE;
236 
237 	return true;
238 }
239 
240 static const struct {
241 	uint32_t mask;
242 	int code;
243 } spefscr_siginfo_map[] = {
244 	{ SPEFSCR_FINV|SPEFSCR_FINVH, FPE_FLTINV },
245 	{ SPEFSCR_FOVF|SPEFSCR_FOVFH, FPE_FLTOVF },
246 	{ SPEFSCR_FUNF|SPEFSCR_FUNFH, FPE_FLTUND },
247 	{ SPEFSCR_FX  |SPEFSCR_FXH,   FPE_FLTRES },
248 	{ SPEFSCR_FDBZ|SPEFSCR_FDBZH, FPE_FLTDIV },
249 	{ SPEFSCR_OV  |SPEFSCR_OVH,   FPE_INTOVF },
250 };
251 
252 int
253 vec_siginfo_code(const struct trapframe *tf)
254 {
255 	for (u_int i = 0; i < __arraycount(spefscr_siginfo_map); i++) {
256 		if (tf->tf_spefscr & spefscr_siginfo_map[i].mask)
257 			return spefscr_siginfo_map[i].code;
258 	}
259 	return 0;
260 }
261 
262 #endif /* PPC_HAVE_SPE */
263