xref: /netbsd-src/sys/arch/powerpc/booke/spe_subr.S (revision 7991f5a7b8fc83a3d55dc2a1767cca3b84103969)
1/*-
2 * Copyright (c) 2011 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <machine/asm.h>
31
32#include "assym.h"
33
34RCSID("$NetBSD: spe_subr.S,v 1.6 2021/07/24 21:31:34 andvar Exp $")
35
36	.text
37#ifndef __clang__
38	.machine	e500x2
39#endif
40/*
41 * Lintstub: void vec_load_from_vreg(const struct vreg *);
42 */
43ENTRY(vec_load_from_vreg)
44	/*
45	 * Load and initialize the accumulator.  Don't care about trashing
46	 * r0's high half since we are about to load it.
47	 */
48	evldd %r0,(8 << 4)(%r3)
49	evmra %r0,%r0
50
51	/*
52	 * SPEFSCR is in the trapframe.
53	 */
54
55	/*
56	 * Since the high part of the register is saved in vreg (because we
57	 * are loading it), and we know SPE is on (to load it), just load two
58	 * high parts at one time and merge appropriately.  For each even-odd
59	 * register pair, the evldw will load high part for the even register
60	 * in the high 32 bits and the high part for the odd register in the
61	 * low 32 bits.
62	 *
63	 * The evmergelo will move the lower half of r0 to the high half of the
64	 * destination register and evmergehilo will merge the high half of r0
65	 * and lower half of the other register.
66	 */
67	evldw %r0,(30 << 2)(%r3)
68		evmergelo %r31,%r0,%r31; evmergehilo %r30,%r0,%r30
69	evldw %r0,(28 << 2)(%r3)
70		evmergelo %r29,%r0,%r29; evmergehilo %r28,%r0,%r28
71	evldw %r0,(26 << 2)(%r3)
72		evmergelo %r27,%r0,%r27; evmergehilo %r26,%r0,%r26
73	evldw %r0,(24 << 2)(%r3)
74		evmergelo %r25,%r0,%r25; evmergehilo %r24,%r0,%r24
75	evldw %r0,(22 << 2)(%r3)
76		evmergelo %r23,%r0,%r23; evmergehilo %r22,%r0,%r22
77	evldw %r0,(20 << 2)(%r3)
78		evmergelo %r21,%r0,%r21; evmergehilo %r20,%r0,%r20
79	evldw %r0,(18 << 2)(%r3)
80		evmergelo %r19,%r0,%r19; evmergehilo %r18,%r0,%r18
81	evldw %r0,(16 << 2)(%r3)
82		evmergelo %r17,%r0,%r17; evmergehilo %r16,%r0,%r16
83	evldw %r0,(14 << 2)(%r3)
84		evmergelo %r15,%r0,%r15; evmergehilo %r14,%r0,%r14
85	evldw %r0,(12 << 2)(%r3)
86		evmergelo %r13,%r0,%r13; evmergehilo %r12,%r0,%r12
87	/*
88	 * Done with callee-saved registers.  For caller-saved we can just
89	 * trash the register contents.  So load the two words with the high
90	 * halves into the appropriate register and merge the half for the
91	 * next register into it.
92	 */
93	evldw %r10,(10 << 2)(%r3)
94		evmergelo %r11,%r10,%r11
95	evldw %r8,(8 << 2)(%r3)
96		evmergelo %r9,%r8,%r9
97	evldw %r6,(6 << 2)(%r3)
98		evmergelo %r7,%r6,%r7
99	evldw %r4,(4 << 2)(%r3)
100		evmergelo %r5,%r4,%r5
101
102	/*
103	 * R2 isn't a callee-saved, so load into r0 because we still need r3
104	 */
105	evldw %r0,(2 << 2)(%r3)
106		evmergelo %r3,%r0,%r3; evmergehilo %r2,%r0,%r2
107	evldd %r0,(0 << 2)(%r3)
108		evmergelo %r1,%r0,%r1		/* why bother? */
109
110	blr
111END(vec_load_from_vreg)
112
113/*
114 * Lintstub: void vec_unload_to_vreg(struct vreg *);
115 */
116ENTRY(vec_unload_to_vreg)
117	evmergehi %r4,%r4,%r0;	/* save r0's high part in lo r4 */
118	evmergehi %r0,%r0,%r1; evstdw %r0,(0 << 2)(%r3)
119	evmergehi %r0,%r2,%r3; evstdw %r0,(2 << 2)(%r3)
120	evmergehi %r0,%r4,%r5; evstdw %r0,(4 << 2)(%r3)
121	evmergehi %r0,%r6,%r7; evstdw %r0,(6 << 2)(%r3)
122	evmergehi %r0,%r8,%r9; evstdw %r0,(8 << 2)(%r3)
123	evmergehi %r0,%r10,%r11; evstdw %r0,(10 << 2)(%r3)
124	evmergehi %r0,%r12,%r13; evstdw %r0,(12 << 2)(%r3)
125	evmergehi %r0,%r14,%r15; evstdw %r0,(14 << 2)(%r3)
126	evmergehi %r0,%r16,%r17; evstdw %r0,(16 << 2)(%r3)
127	evmergehi %r0,%r18,%r19; evstdw %r0,(18 << 2)(%r3)
128	evmergehi %r0,%r20,%r21; evstdw %r0,(20 << 2)(%r3)
129	evmergehi %r0,%r22,%r23; evstdw %r0,(22 << 2)(%r3)
130	evmergehi %r0,%r24,%r25; evstdw %r0,(24 << 2)(%r3)
131	evmergehi %r0,%r26,%r27; evstdw %r0,(26 << 2)(%r3)
132	evmergehi %r0,%r28,%r29; evstdw %r0,(28 << 2)(%r3)
133	evmergehi %r0,%r30,%r31; evstdw %r0,(30 << 2)(%r3)
134
135	/*
136	 * Now save the accumulator.
137	 */
138	evxor %r0,%r0,%r0		/* zero r0 */
139	evaddumiaaw %r0,%r0		/* r0 = accum + r0 */
140	evstdd %r0,(8 << 4)(%r3)	/* store it */
141
142	evmergelo %r0,%r4,%r0		/* retore r0's high half */
143
144	/*
145	 * The SPEFSCR will be restored when the exception returns.
146	 */
147	blr
148END(vec_unload_to_vreg)
149