xref: /netbsd-src/sys/arch/powerpc/oea/altivec.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: altivec.c,v 1.29 2014/05/16 00:48:41 rmind Exp $	*/
2 
3 /*
4  * Copyright (C) 1996 Wolfgang Solfrank.
5  * Copyright (C) 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: altivec.c,v 1.29 2014/05/16 00:48:41 rmind Exp $");
36 
37 #include "opt_multiprocessor.h"
38 
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42 #include <sys/atomic.h>
43 
44 #include <uvm/uvm_extern.h>		/*  for vcopypage/vzeropage */
45 
46 #include <powerpc/pcb.h>
47 #include <powerpc/altivec.h>
48 #include <powerpc/spr.h>
49 #include <powerpc/oea/spr.h>
50 #include <powerpc/psl.h>
51 
52 static void vec_state_load(lwp_t *, u_int);
53 static void vec_state_save(lwp_t *);
54 static void vec_state_release(lwp_t *);
55 
56 const pcu_ops_t vec_ops = {
57 	.pcu_id = PCU_VEC,
58 	.pcu_state_load = vec_state_load,
59 	.pcu_state_save = vec_state_save,
60 	.pcu_state_release = vec_state_release,
61 };
62 
63 bool
64 vec_used_p(lwp_t *l)
65 {
66 	return pcu_valid_p(&vec_ops);
67 }
68 
69 void
70 vec_mark_used(lwp_t *l)
71 {
72 	return pcu_discard(&vec_ops, true);
73 }
74 
75 void
76 vec_state_load(lwp_t *l, u_int flags)
77 {
78 	struct pcb * const pcb = lwp_getpcb(l);
79 
80 	if ((flags & PCU_VALID) == 0) {
81 		memset(&pcb->pcb_vr, 0, sizeof(pcb->pcb_vr));
82 		vec_mark_used(l);
83 	}
84 
85 	/*
86 	 * Enable AltiVec temporarily (and disable interrupts).
87 	 */
88 	const register_t msr = mfmsr();
89 	mtmsr((msr & ~PSL_EE) | PSL_VEC);
90 	__asm volatile ("isync");
91 
92 	/*
93 	 * Load the vector unit from vreg which is best done in
94 	 * assembly.
95 	 */
96 	vec_load_from_vreg(&pcb->pcb_vr);
97 
98 	/*
99 	 * VRSAVE will be restored when trap frame returns
100 	 */
101 	l->l_md.md_utf->tf_vrsave = pcb->pcb_vr.vrsave;
102 
103 	/*
104 	 * Restore MSR (turn off AltiVec)
105 	 */
106 	mtmsr(msr);
107 	__asm volatile ("isync");
108 
109 	/*
110 	 * Mark vector registers as modified.
111 	 */
112 	l->l_md.md_flags |= PSL_VEC;
113 	l->l_md.md_utf->tf_srr1 |= PSL_VEC;
114 }
115 
116 void
117 vec_state_save(lwp_t *l)
118 {
119 	struct pcb * const pcb = lwp_getpcb(l);
120 
121 	/*
122 	 * Turn on AltiVEC, turn off interrupts.
123 	 */
124 	const register_t msr = mfmsr();
125 	mtmsr((msr & ~PSL_EE) | PSL_VEC);
126 	__asm volatile ("isync");
127 
128 	/*
129 	 * Grab contents of vector unit.
130 	 */
131 	vec_unload_to_vreg(&pcb->pcb_vr);
132 
133 	/*
134 	 * Save VRSAVE
135 	 */
136 	pcb->pcb_vr.vrsave = l->l_md.md_utf->tf_vrsave;
137 
138 	/*
139 	 * Note that we aren't using any CPU resources and stop any
140 	 * data streams.
141 	 */
142 	__asm volatile ("dssall; sync");
143 
144 	/*
145 	 * Restore MSR (turn off AltiVec)
146 	 */
147 	mtmsr(msr);
148 	__asm volatile ("isync");
149 }
150 
151 void
152 vec_state_release(lwp_t *l)
153 {
154 	__asm volatile("dssall;sync");
155 	l->l_md.md_utf->tf_srr1 &= ~PSL_VEC;
156 	l->l_md.md_flags &= ~PSL_VEC;
157 }
158 
159 void
160 vec_restore_from_mcontext(struct lwp *l, const mcontext_t *mcp)
161 {
162 	struct pcb * const pcb = lwp_getpcb(l);
163 
164 	KASSERT(l == curlwp);
165 
166 	/* we don't need to save the state, just drop it */
167 	pcu_discard(&vec_ops, true);
168 	memcpy(pcb->pcb_vr.vreg, &mcp->__vrf.__vrs, sizeof (pcb->pcb_vr.vreg));
169 	pcb->pcb_vr.vscr = mcp->__vrf.__vscr;
170 	pcb->pcb_vr.vrsave = mcp->__vrf.__vrsave;
171 	l->l_md.md_utf->tf_vrsave = pcb->pcb_vr.vrsave;
172 }
173 
174 bool
175 vec_save_to_mcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flagp)
176 {
177 	struct pcb * const pcb = lwp_getpcb(l);
178 
179 	KASSERT(l == curlwp);
180 
181 	/* Save AltiVec context, if any. */
182 	if (!vec_used_p(l))
183 		return false;
184 
185 	/*
186 	 * If we're the AltiVec owner, dump its context to the PCB first.
187 	 */
188 	pcu_save(&vec_ops);
189 
190 	mcp->__gregs[_REG_MSR] |= PSL_VEC;
191 	mcp->__vrf.__vscr = pcb->pcb_vr.vscr;
192 	mcp->__vrf.__vrsave = l->l_md.md_utf->tf_vrsave;
193 	memcpy(mcp->__vrf.__vrs, pcb->pcb_vr.vreg, sizeof (mcp->__vrf.__vrs));
194 	*flagp |= _UC_POWERPC_VEC;
195 	return true;
196 }
197 
198 #define ZERO_VEC	19
199 
200 void
201 vzeropage(paddr_t pa)
202 {
203 	const paddr_t ea = pa + PAGE_SIZE;
204 	uint32_t vec[7], *vp = (void *) roundup((uintptr_t) vec, 16);
205 	register_t omsr, msr;
206 
207 	__asm volatile("mfmsr %0" : "=r"(omsr) :);
208 
209 	/*
210 	 * Turn on AltiVec, turn off interrupts.
211 	 */
212 	msr = (omsr & ~PSL_EE) | PSL_VEC;
213 	__asm volatile("sync; mtmsr %0; isync" :: "r"(msr));
214 
215 	/*
216 	 * Save the VEC register we are going to use before we disable
217 	 * relocation.
218 	 */
219 	__asm("stvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC));
220 	__asm("vxor %0,%0,%0" :: "n"(ZERO_VEC));
221 
222 	/*
223 	 * Zero the page using a single cache line.
224 	 */
225 	__asm volatile(
226 	    "   sync ;"
227 	    "   mfmsr  %[msr];"
228 	    "   rlwinm %[msr],%[msr],0,28,26;"	/* Clear PSL_DR */
229 	    "   mtmsr  %[msr];"			/* Turn off DMMU */
230 	    "   isync;"
231 	    "1: stvx   %[zv], %[pa], %[off0];"
232 	    "   stvxl  %[zv], %[pa], %[off16];"
233 	    "   stvx   %[zv], %[pa], %[off32];"
234 	    "   stvxl  %[zv], %[pa], %[off48];"
235 	    "   addi   %[pa], %[pa], 64;"
236 	    "   cmplw  %[pa], %[ea];"
237 	    "	blt+   1b;"
238 	    "   ori    %[msr], %[msr], 0x10;"	/* Set PSL_DR */
239 	    "   sync;"
240 	    "	mtmsr  %[msr];"			/* Turn on DMMU */
241 	    "   isync;"
242 	    :: [msr] "r"(msr), [pa] "b"(pa), [ea] "b"(ea),
243 	    [off0] "r"(0), [off16] "r"(16), [off32] "r"(32), [off48] "r"(48),
244 	    [zv] "n"(ZERO_VEC));
245 
246 	/*
247 	 * Restore VEC register (now that we can access the stack again).
248 	 */
249 	__asm("lvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC));
250 
251 	/*
252 	 * Restore old MSR (AltiVec OFF).
253 	 */
254 	__asm volatile("sync; mtmsr %0; isync" :: "r"(omsr));
255 }
256 
257 #define LO_VEC	16
258 #define HI_VEC	17
259 
260 void
261 vcopypage(paddr_t dst, paddr_t src)
262 {
263 	const paddr_t edst = dst + PAGE_SIZE;
264 	uint32_t vec[11], *vp = (void *) roundup((uintptr_t) vec, 16);
265 	register_t omsr, msr;
266 
267 	__asm volatile("mfmsr %0" : "=r"(omsr) :);
268 
269 	/*
270 	 * Turn on AltiVec, turn off interrupts.
271 	 */
272 	msr = (omsr & ~PSL_EE) | PSL_VEC;
273 	__asm volatile("sync; mtmsr %0; isync" :: "r"(msr));
274 
275 	/*
276 	 * Save the VEC registers we will be using before we disable
277 	 * relocation.
278 	 */
279 	__asm("stvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC));
280 	__asm("stvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC));
281 
282 	/*
283 	 * Copy the page using a single cache line, with DMMU
284 	 * disabled.  On most PPCs, two vector registers occupy one
285 	 * cache line.
286 	 */
287 	__asm volatile(
288 	    "   sync ;"
289 	    "   mfmsr  %[msr];"
290 	    "   rlwinm %[msr],%[msr],0,28,26;"	/* Clear PSL_DR */
291 	    "   mtmsr  %[msr];"			/* Turn off DMMU */
292 	    "   isync;"
293 	    "1: lvx    %[lv], %[src], %[off0];"
294 	    "   stvx   %[lv], %[dst], %[off0];"
295 	    "   lvxl   %[hv], %[src], %[off16];"
296 	    "   stvxl  %[hv], %[dst], %[off16];"
297 	    "   addi   %[src], %[src], 32;"
298 	    "   addi   %[dst], %[dst], 32;"
299 	    "   cmplw  %[dst], %[edst];"
300 	    "	blt+   1b;"
301 	    "   ori    %[msr], %[msr], 0x10;"	/* Set PSL_DR */
302 	    "   sync;"
303 	    "	mtmsr  %[msr];"			/* Turn on DMMU */
304 	    "   isync;"
305 	    :: [msr] "r"(msr), [src] "b"(src), [dst] "b"(dst),
306 	    [edst] "b"(edst), [off0] "r"(0), [off16] "r"(16),
307 	    [lv] "n"(LO_VEC), [hv] "n"(HI_VEC));
308 
309 	/*
310 	 * Restore VEC registers (now that we can access the stack again).
311 	 */
312 	__asm("lvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC));
313 	__asm("lvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC));
314 
315 	/*
316 	 * Restore old MSR (AltiVec OFF).
317 	 */
318 	__asm volatile("sync; mtmsr %0; isync" :: "r"(omsr));
319 }
320