xref: /netbsd-src/sys/arch/sparc/dev/sxreg.h (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: sxreg.h,v 1.11 2013/06/19 00:41:16 macallan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Michael Lorenz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /* register definitions for Sun's SX / SPAM rendering engine */
33 
34 #ifndef SXREG_H
35 #define SXREG_H
36 
37 /* SX control registers */
38 #define SX_CONTROL_STATUS	0x00000000
39 #define SX_ERROR		0x00000004
40 #define SX_PAGE_BOUND_LOWER	0x00000008
41 #define SX_PAGE_BOUND_UPPER	0x0000000c
42 #define SX_PLANEMASK		0x00000010
43 #define SX_ROP_CONTROL		0x00000014	/* 8 bit ROP */
44 #define SX_IQ_OVERFLOW_COUNTER	0x00000018
45 #define SX_DIAGNOSTICS		0x0000001c
46 #define SX_INSTRUCTIONS		0x00000020
47 #define SX_ID			0x00000028
48 #define SX_R0_INIT		0x0000002c
49 #define SX_SOFTRESET		0x00000030
50 /* write registers directly, only when processor is stopped */
51 #define SX_DIRECT_R0		0x00000100
52 #define SX_DIRECT_R1		0x00000104	/* and so on until R127 */
53 /* write registers via pseudo instructions */
54 #define SX_QUEUED_R0		0x00000300
55 #define SX_QUEUED_R1		0x00000304	/* and so on until R127 */
56 #define SX_QUEUED(r)		(0x300 + (r << 2))
57 
58 /* special purpose registers */
59 #define R_ZERO	0
60 #define R_SCAM	1
61 #define R_MASK	2	/* bitmask for SX_STORE_SELECT */
62 
63 /*
64  * registers are repeated at 0x1000 with certain parts read only
65  * ( like the PAGE_BOUND registers ) which userland has no business writing to
66  */
67 
68 /* SX_CONTROL_STATUS */
69 #define SX_EE1		0x00000001	/* illegal instruction */
70 #define SX_EE2		0x00000002	/* page bound error */
71 #define SX_EE3		0x00000004	/* illegal memory access */
72 #define SX_EE4		0x00000008	/* illegal register access */
73 #define SX_EE5		0x00000010	/* alignment violation */
74 #define SX_EE6		0x00000020	/* illegal instruction queue write */
75 #define SX_EI		0x00000080	/* interrupt on error */
76 #define SX_PB		0x00001000	/* enable page bound checking */
77 #define SX_WO		0x00002000	/* write occured ( by SX ) */
78 #define SX_GO		0x00004000	/* start/stop the processor */
79 #define SX_MT		0x00008000	/* instruction queue is empty */
80 
81 /* SX_ERROR */
82 #define SX_SE1		0x00000001	/* illegal instruction */
83 #define SX_SE2		0x00000002	/* page bound error */
84 #define SX_SE3		0x00000004	/* illegal memory access */
85 #define SX_SE4		0x00000008	/* illegal register access */
86 #define SX_SE5		0x00000010	/* alignment violation */
87 #define SX_SE6		0x00000020	/* illegal instruction queue write */
88 #define SX_SI		0x00000080	/* interrupt on error */
89 
90 /* SX_ID */
91 #define SX_ARCHITECTURE_MASK	0x000000ff
92 #define SX_CHIP_REVISION	0x0000ff00
93 
94 /* SX_DIAGNOSTICS */
95 #define SX_IQ_FIFO_ACCESS	0x00000001	/* allow memory instructions
96 						 * in SX_INSTRUCTIONS */
97 
98 /*
99  * memory referencing instructions are written to 0x800000000 + PA
100  * so we have to go through ASI 0x28 ( ASI_BYPASS + 8 )
101  */
102 #define ASI_SX	0x28
103 
104 /* load / store instructions */
105 #define SX_STORE_COND	(0x4 << 19)	/* conditional write with mask */
106 #define SX_STORE_CLAMP	(0x2 << 19)
107 #define SX_STORE_MASK	(0x1 << 19)	/* apply plane mask */
108 #define SX_STORE_SELECT	(0x8 << 19)	/* expand with plane reg dest[0]/dest[1] */
109 #define SX_LOAD		(0xa << 19)
110 #define SX_STORE	(0x0 << 19)
111 
112 /* data type */
113 #define SX_UBYTE_0	(0x00 << 14)
114 #define SX_UBYTE_8	(0x01 << 14)
115 #define SX_UBYTE_16	(0x02 << 14)
116 #define SX_UBYTE_24	(0x03 << 14)
117 #define SX_SBYTE_0	(0x04 << 14)
118 #define SX_SBYTE_8	(0x05 << 14)
119 #define SX_SBYTE_16	(0x06 << 14)
120 #define SX_SBYTE_24	(0x07 << 14)
121 #define SX_UQUAD_0	(0x08 << 14)
122 #define SX_UQUAD_8	(0x09 << 14)
123 #define SX_UQUAD_16	(0x0a << 14)
124 #define SX_UQUAD_24	(0x0b << 14)
125 #define SX_SQUAD_0	(0x0c << 14)
126 #define SX_SQUAD_8	(0x0d << 14)
127 #define SX_SQUAD_16	(0x0e << 14)
128 #define SX_SQUAD_24	(0x0f << 14)
129 #define SX_UCHAN_0	(0x10 << 14)
130 #define SX_UCHAN_8	(0x11 << 14)
131 #define SX_UCHAN_16	(0x12 << 14)
132 #define SX_UCHAN_24	(0x13 << 14)
133 #define SX_SCHAN_0	(0x14 << 14)
134 #define SX_SCHAN_8	(0x15 << 14)
135 #define SX_SCHAN_16	(0x16 << 14)
136 #define SX_SCHAN_24	(0x17 << 14)
137 #define SX_USHORT_0	(0x18 << 14)
138 #define SX_USHORT_8	(0x19 << 14)
139 #define SX_USHORT_16	(0x1a << 14)
140 #define SX_SSHORT_0	(0x1c << 14)
141 #define SX_SSHORT_8	(0x1d << 14)
142 #define SX_SSHORT_16	(0x1e << 14)
143 #define SX_LONG		(0x1b << 14)
144 #define SX_PACKED	(0x1f << 14)
145 
146 
147 #define SX_LD(dreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_LOAD | \
148 				SX_LONG | (dreg << 7) | (o))
149 #define SX_LDB(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
150 				SX_UBYTE_0 | (dreg << 7) | (o))
151 #define SX_LDP(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
152 				SX_PACKED | (dreg << 7) | (o))
153 #define SX_LDUQ0(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
154 				SX_UQUAD_0 | (dreg << 7) | (o))
155 #define SX_LDUQ8(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
156 				SX_UQUAD_8 | (dreg << 7) | (o))
157 #define SX_LDUQ16(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
158 				SX_UQUAD_16 | (dreg << 7) | (o))
159 #define SX_LDUQ24(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
160 				SX_UQUAD_24 | (dreg << 7) | (o))
161 #define SX_ST(sreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_STORE | \
162 				SX_LONG | (sreg << 7) | (o))
163 #define SX_STM(sreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_STORE_MASK | \
164 				SX_LONG | (sreg << 7) | (o))
165 #define SX_STB(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
166 				SX_UBYTE_0 | (sreg << 7) | (o))
167 #define SX_STBC(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
168 				SX_UBYTE_0 | (sreg << 7) | (o))
169 #define SX_STP(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
170 				SX_PACKED | (sreg << 7) | (o))
171 #define SX_STS(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
172 				| SX_LONG | (sreg << 7) | (o))
173 #define SX_STBS(reg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
174 				| SX_UBYTE_0 | (reg << 7) | (o))
175 #define SX_STUQ0(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
176 				SX_UQUAD_0 | (sreg << 7) | (o))
177 #define SX_STUQ0C(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
178 				SX_UQUAD_0 | (sreg << 7) | (o))
179 #define SX_STUQ8(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
180 				SX_UQUAD_8 | (sreg << 7) | (o))
181 #define SX_STUQ16(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
182 				SX_UQUAD_16 | (sreg << 7) | (o))
183 #define SX_STUQ24(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
184 				SX_UQUAD_24 | (sreg << 7) | (o))
185 
186 /* ROP and SELECT instructions */
187 #define SX_ROPB	(0x0 << 21)	/* mask bits apply to bytes */
188 #define SX_ROPM	(0x1 << 21)	/* mask bits apply to each bit */
189 #define SX_ROPL	(0x2 << 21)	/* mask bits apply per register */
190 #define SX_SELB	(0x4 << 21)	/* byte select scalar */
191 #define SX_SELV (0x6 << 21)	/* register select vector */
192 #define SX_SELS (0x7 << 21)	/* register select scalar */
193 
194 #define SX_ROP(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_ROPL | \
195 		((sa) << 14) | (sb) | ((d) << 7))
196 #define SX_SELECT_S(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_SELS | \
197 		((sa) << 14) | (sb) | ((d) << 7))
198 
199 /* multiply group */
200 #define SX_M16X16SR0	(0x0 << 28)	/* 16bit multiply, no shift */
201 #define SX_M16X16SR8	(0x1 << 28)	/* 16bit multiply, shift right 8 */
202 #define SX_M16X16SR16	(0x2 << 28)	/* 16bit multiply, shift right 16 */
203 #define SX_M32X16SR0	(0x4 << 28)	/* 32x16bit multiply, no shift */
204 #define SX_M32X16SR8	(0x5 << 28)	/* 32x16bit multiply, shift right 8 */
205 #define SX_M32X16SR16	(0x6 << 28)	/* 32x16bit multiply, shift right 16 */
206 
207 #define SX_MULTIPLY	(0x0 << 21)	/* normal multiplication */
208 #define SX_DOT		(0x1 << 21)	/* dot product of A and B */
209 #define SX_SAXP		(0x2 << 21)	/* A * SCAM + B */
210 
211 #define SX_ROUND	(0x1 << 23)	/* round results */
212 
213 #define SX_MUL16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
214 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
215 #define SX_MUL16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
216 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
217 #define SX_MUL16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
218 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
219 #define SX_MUL16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
220 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
221 
222 #define SX_SAXP16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
223 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
224 #define SX_SAXP16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
225 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
226 #define SX_SAXP16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
227 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
228 #define SX_SAXP16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
229 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
230 
231 /* logic group */
232 #define SX_AND_V	(0x0 << 21)	/* vector AND vector */
233 #define SX_AND_S	(0x1 << 21)	/* vector AND scalar */
234 #define SX_AND_I	(0x2 << 21)	/* vector AND immediate */
235 #define SX_XOR_V	(0x3 << 21)	/* vector XOR vector */
236 #define SX_XOR_S	(0x4 << 21)	/* vector XOR scalar */
237 #define SX_XOR_I	(0x5 << 21)	/* vector XOR immediate */
238 #define SX_OR_V		(0x6 << 21)	/* vector OR vector */
239 #define SX_OR_S		(0x7 << 21)	/* vector OR scalar */
240 /* immediates are 7bit sign extended to 32bit */
241 
242 #define SX_ANDV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_V | \
243 		((sa) << 14) | ((d) << 7) | (sb))
244 #define SX_ANDS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_S | \
245 		((sa) << 14) | ((d) << 7) | (sb))
246 #define SX_ANDI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_I | \
247 		((sa) << 14) | ((d) << 7) | (sb))
248 #define SX_XORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_V | \
249 		((sa) << 14) | ((d) << 7) | (sb))
250 #define SX_XORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_S | \
251 		((sa) << 14) | ((d) << 7) | (sb))
252 #define SX_XORI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_I | \
253 		((sa) << 14) | ((d) << 7) | (sb))
254 #define SX_ORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_V | \
255 		((sa) << 14) | ((d) << 7) | (sb))
256 #define SX_ORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_S | \
257 		((sa) << 14) | ((d) << 7) | (sb))
258 
259 /* arithmetic group */
260 #define SX_ADD_V	(0x00 << 21)	/* vector + vector */
261 #define SX_ADD_S	(0x01 << 21)	/* vector + scalar */
262 #define SX_ADD_I	(0x02 << 21)	/* vector + immediate */
263 #define SX_SUM		(0x03 << 21)	/* sum of vector and scalar */
264 #define SX_SUB_V	(0x04 << 21)	/* vector - vector */
265 #define SX_SUB_S	(0x05 << 21)	/* vector - scalar */
266 #define SX_SUB_I	(0x06 << 21)	/* vector - immediate */
267 #define SX_ABS		(0x07 << 21)	/* abs(sb) with sa=R0 */
268 /* hardware does sa - sb for sb < 0 and sa + sb if sb > 0 */
269 
270 #define SX_ADDV(sa, sb, d, cnt) (0xa0000000 | ((cnt) << 24) | SX_ADD_V | \
271 		((sa) << 14) | ((d) << 7) | (sb))
272 
273 #endif /* SXREG_H */
274