xref: /netbsd-src/sys/arch/sparc/dev/sxreg.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: sxreg.h,v 1.16 2017/12/08 22:28:54 macallan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Michael Lorenz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /* register definitions for Sun's SX / SPAM rendering engine */
33 
34 #ifndef SXREG_H
35 #define SXREG_H
36 
37 /* SX control registers */
38 #define SX_CONTROL_STATUS	0x00000000
39 #define SX_ERROR		0x00000004
40 #define SX_PAGE_BOUND_LOWER	0x00000008
41 #define SX_PAGE_BOUND_UPPER	0x0000000c
42 #define SX_PLANEMASK		0x00000010
43 #define SX_ROP_CONTROL		0x00000014	/* 8 bit ROP */
44 #define SX_IQ_OVERFLOW_COUNTER	0x00000018
45 #define SX_DIAGNOSTICS		0x0000001c
46 #define SX_INSTRUCTIONS		0x00000020
47 #define SX_ID			0x00000028
48 #define SX_R0_INIT		0x0000002c
49 #define SX_SOFTRESET		0x00000030
50 /* write registers directly, only when processor is stopped */
51 #define SX_DIRECT_R0		0x00000100
52 #define SX_DIRECT_R1		0x00000104	/* and so on until R127 */
53 /* write registers via pseudo instructions */
54 #define SX_QUEUED_R0		0x00000300
55 #define SX_QUEUED_R1		0x00000304	/* and so on until R127 */
56 #define SX_QUEUED(r)		(0x300 + ((r) << 2))
57 
58 /* special purpose registers */
59 #define R_ZERO	0
60 #define R_SCAM	1
61 #define R_MASK	2	/* bitmask for SX_STORE_SELECT */
62 
63 /*
64  * registers are repeated at 0x1000 with certain parts read only
65  * ( like the PAGE_BOUND registers ) which userland has no business writing to
66  */
67 
68 /* SX_CONTROL_STATUS */
69 #define SX_EE1		0x00000001	/* illegal instruction */
70 #define SX_EE2		0x00000002	/* page bound error */
71 #define SX_EE3		0x00000004	/* illegal memory access */
72 #define SX_EE4		0x00000008	/* illegal register access */
73 #define SX_EE5		0x00000010	/* alignment violation */
74 #define SX_EE6		0x00000020	/* illegal instruction queue write */
75 #define SX_EI		0x00000080	/* interrupt on error */
76 #define SX_PB		0x00001000	/* enable page bound checking */
77 #define SX_WO		0x00002000	/* write occured ( by SX ) */
78 #define SX_GO		0x00004000	/* start/stop the processor */
79 #define SX_MT		0x00008000	/* instruction queue is empty */
80 
81 /* SX_ERROR */
82 #define SX_SE1		0x00000001	/* illegal instruction */
83 #define SX_SE2		0x00000002	/* page bound error */
84 #define SX_SE3		0x00000004	/* illegal memory access */
85 #define SX_SE4		0x00000008	/* illegal register access */
86 #define SX_SE5		0x00000010	/* alignment violation */
87 #define SX_SE6		0x00000020	/* illegal instruction queue write */
88 #define SX_SI		0x00000080	/* interrupt on error */
89 
90 /* SX_ID */
91 #define SX_ARCHITECTURE_MASK	0x000000ff
92 #define SX_CHIP_REVISION	0x0000ff00
93 
94 /* SX_DIAGNOSTICS */
95 #define SX_IQ_FIFO_ACCESS	0x00000001	/* allow memory instructions
96 						 * in SX_INSTRUCTIONS */
97 
98 /*
99  * memory referencing instructions are written to 0x800000000 + PA
100  * so we have to go through ASI 0x28 ( ASI_BYPASS + 8 )
101  */
102 #define ASI_SX	0x28
103 
104 /* load / store instructions */
105 #define SX_STORE_COND	(0x4 << 19)	/* conditional write with mask */
106 #define SX_STORE_CLAMP	(0x2 << 19)
107 #define SX_STORE_MASK	(0x1 << 19)	/* apply plane mask */
108 #define SX_STORE_SELECT	(0x8 << 19)	/* expand with plane reg dest[0]/dest[1] */
109 #define SX_LOAD		(0xa << 19)
110 #define SX_STORE	(0x0 << 19)
111 
112 /* data type */
113 #define SX_UBYTE_0	(0x00 << 14)
114 #define SX_UBYTE_8	(0x01 << 14)
115 #define SX_UBYTE_16	(0x02 << 14)
116 #define SX_UBYTE_24	(0x03 << 14)
117 #define SX_SBYTE_0	(0x04 << 14)
118 #define SX_SBYTE_8	(0x05 << 14)
119 #define SX_SBYTE_16	(0x06 << 14)
120 #define SX_SBYTE_24	(0x07 << 14)
121 #define SX_UQUAD_0	(0x08 << 14)
122 #define SX_UQUAD_8	(0x09 << 14)
123 #define SX_UQUAD_16	(0x0a << 14)
124 #define SX_UQUAD_24	(0x0b << 14)
125 #define SX_SQUAD_0	(0x0c << 14)
126 #define SX_SQUAD_8	(0x0d << 14)
127 #define SX_SQUAD_16	(0x0e << 14)
128 #define SX_SQUAD_24	(0x0f << 14)
129 #define SX_UCHAN_0	(0x10 << 14)
130 #define SX_UCHAN_8	(0x11 << 14)
131 #define SX_UCHAN_16	(0x12 << 14)
132 #define SX_UCHAN_24	(0x13 << 14)
133 #define SX_SCHAN_0	(0x14 << 14)
134 #define SX_SCHAN_8	(0x15 << 14)
135 #define SX_SCHAN_16	(0x16 << 14)
136 #define SX_SCHAN_24	(0x17 << 14)
137 #define SX_USHORT_0	(0x18 << 14)
138 #define SX_USHORT_8	(0x19 << 14)
139 #define SX_USHORT_16	(0x1a << 14)
140 #define SX_SSHORT_0	(0x1c << 14)
141 #define SX_SSHORT_8	(0x1d << 14)
142 #define SX_SSHORT_16	(0x1e << 14)
143 #define SX_LONG		(0x1b << 14)
144 #define SX_PACKED	(0x1f << 14)
145 
146 
147 #define SX_LD(dreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_LOAD | \
148 				SX_LONG | (dreg << 7) | (o))
149 #define SX_LDB(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
150 				SX_UBYTE_0 | (dreg << 7) | (o))
151 #define SX_LDP(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
152 				SX_PACKED | (dreg << 7) | (o))
153 #define SX_LDUQ0(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
154 				SX_UQUAD_0 | (dreg << 7) | (o))
155 #define SX_LDUQ8(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
156 				SX_UQUAD_8 | (dreg << 7) | (o))
157 #define SX_LDUQ16(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
158 				SX_UQUAD_16 | (dreg << 7) | (o))
159 #define SX_LDUQ24(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
160 				SX_UQUAD_24 | (dreg << 7) | (o))
161 #define SX_LDUC0(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
162 				SX_UCHAN_0 | (dreg << 7) | (o))
163 #define SX_LDUC8(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
164 				SX_UCHAN_8 | (dreg << 7) | (o))
165 #define SX_LDUC16(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
166 				SX_UCHAN_16 | (dreg << 7) | (o))
167 #define SX_LDUC24(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
168 				SX_UCHAN_24 | (dreg << 7) | (o))
169 #define SX_ST(sreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_STORE | \
170 				SX_LONG | (sreg << 7) | (o))
171 #define SX_STM(sreg, cnt, o)  (0x80000000 | ((cnt) << 23) | SX_STORE_MASK | \
172 				SX_LONG | (sreg << 7) | (o))
173 #define SX_STB(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
174 				SX_UBYTE_0 | (sreg << 7) | (o))
175 #define SX_STBM(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_MASK | \
176 				SX_UBYTE_0 | (sreg << 7) | (o))
177 #define SX_STBC(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
178 				SX_UBYTE_0 | (sreg << 7) | (o))
179 #define SX_STP(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
180 				SX_PACKED | (sreg << 7) | (o))
181 #define SX_STS(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
182 				| SX_LONG | (sreg << 7) | (o))
183 #define SX_STBS(reg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
184 				| SX_UBYTE_0 | (reg << 7) | (o))
185 #define SX_STUQ0(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
186 				SX_UQUAD_0 | (sreg << 7) | (o))
187 #define SX_STUQ0C(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
188 				SX_UQUAD_0 | (sreg << 7) | (o))
189 #define SX_STUQ8(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
190 				SX_UQUAD_8 | (sreg << 7) | (o))
191 #define SX_STUQ16(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
192 				SX_UQUAD_16 | (sreg << 7) | (o))
193 #define SX_STUQ24(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
194 				SX_UQUAD_24 | (sreg << 7) | (o))
195 #define SX_STUC0(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
196 				SX_UCHAN_0 | (sreg << 7) | (o))
197 #define SX_STUC0C(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
198 				SX_UCHAN_0 | (sreg << 7) | (o))
199 #define SX_STUC8(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
200 				SX_UCHAN_8 | (sreg << 7) | (o))
201 #define SX_STUC16(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
202 				SX_UCHAN_16 | (sreg << 7) | (o))
203 #define SX_STUC24(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
204 				SX_UCHAN_24 | (sreg << 7) | (o))
205 
206 /* ROP and SELECT instructions */
207 #define SX_ROPB	(0x0 << 21)	/* mask bits apply to bytes */
208 #define SX_ROPM	(0x1 << 21)	/* mask bits apply to each bit */
209 #define SX_ROPL	(0x2 << 21)	/* mask bits apply per register */
210 #define SX_SELB	(0x4 << 21)	/* byte select scalar */
211 #define SX_SELV (0x6 << 21)	/* register select vector */
212 #define SX_SELS (0x7 << 21)	/* register select scalar */
213 
214 #define SX_ROP(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_ROPL | \
215 		((sa) << 14) | (sb) | ((d) << 7))
216 #define SX_SELECT_S(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_SELS | \
217 		((sa) << 14) | (sb) | ((d) << 7))
218 
219 /* multiply group */
220 #define SX_M16X16SR0	(0x0 << 28)	/* 16bit multiply, no shift */
221 #define SX_M16X16SR8	(0x1 << 28)	/* 16bit multiply, shift right 8 */
222 #define SX_M16X16SR16	(0x2 << 28)	/* 16bit multiply, shift right 16 */
223 #define SX_M32X16SR0	(0x4 << 28)	/* 32x16bit multiply, no shift */
224 #define SX_M32X16SR8	(0x5 << 28)	/* 32x16bit multiply, shift right 8 */
225 #define SX_M32X16SR16	(0x6 << 28)	/* 32x16bit multiply, shift right 16 */
226 
227 #define SX_MULTIPLY	(0x0 << 21)	/* normal multiplication */
228 #define SX_DOT		(0x1 << 21)	/* dot product of A and B */
229 #define SX_SAXP		(0x2 << 21)	/* A * SCAM + B */
230 
231 #define SX_ROUND	(0x1 << 23)	/* round results */
232 
233 #define SX_MUL16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
234 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
235 #define SX_MUL16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
236 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
237 #define SX_MUL16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
238 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
239 #define SX_MUL16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
240 		SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
241 
242 #define SX_SAXP16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
243 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
244 #define SX_SAXP16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
245 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
246 #define SX_SAXP16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
247 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
248 #define SX_SAXP16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
249 		SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
250 
251 /* logic group */
252 #define SX_AND_V	(0x0 << 21)	/* vector AND vector */
253 #define SX_AND_S	(0x1 << 21)	/* vector AND scalar */
254 #define SX_AND_I	(0x2 << 21)	/* vector AND immediate */
255 #define SX_XOR_V	(0x3 << 21)	/* vector XOR vector */
256 #define SX_XOR_S	(0x4 << 21)	/* vector XOR scalar */
257 #define SX_XOR_I	(0x5 << 21)	/* vector XOR immediate */
258 #define SX_OR_V		(0x6 << 21)	/* vector OR vector */
259 #define SX_OR_S		(0x7 << 21)	/* vector OR scalar */
260 /* immediates are 7bit sign extended to 32bit */
261 
262 #define SX_ANDV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_V | \
263 		((sa) << 14) | ((d) << 7) | (sb))
264 #define SX_ANDS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_S | \
265 		((sa) << 14) | ((d) << 7) | (sb))
266 #define SX_ANDI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_I | \
267 		((sa) << 14) | ((d) << 7) | (sb))
268 #define SX_XORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_V | \
269 		((sa) << 14) | ((d) << 7) | (sb))
270 #define SX_XORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_S | \
271 		((sa) << 14) | ((d) << 7) | (sb))
272 #define SX_XORI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_I | \
273 		((sa) << 14) | ((d) << 7) | (sb))
274 #define SX_ORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_V | \
275 		((sa) << 14) | ((d) << 7) | (sb))
276 #define SX_ORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_S | \
277 		((sa) << 14) | ((d) << 7) | (sb))
278 
279 /* arithmetic group */
280 #define SX_ADD_V	(0x00 << 21)	/* vector + vector */
281 #define SX_ADD_S	(0x01 << 21)	/* vector + scalar */
282 #define SX_ADD_I	(0x02 << 21)	/* vector + immediate */
283 #define SX_SUM		(0x03 << 21)	/* sum of vector and scalar */
284 #define SX_SUB_V	(0x04 << 21)	/* vector - vector */
285 #define SX_SUB_S	(0x05 << 21)	/* vector - scalar */
286 #define SX_SUB_I	(0x06 << 21)	/* vector - immediate */
287 #define SX_ABS		(0x07 << 21)	/* abs(sb) with sa=R0 */
288 /* hardware does sa - sb for sb < 0 and sa + sb if sb > 0 */
289 
290 #define SX_ADDV(sa, sb, d, cnt) (0xa0000000 | ((cnt) << 24) | SX_ADD_V | \
291 		((sa) << 14) | ((d) << 7) | (sb))
292 
293 /* MISC group */
294 #define SX_GTHR		(3 << 21)	/* sa with spacing sb -> d */
295 #define SX_SCTR		(2 << 21)	/* sa -> d with spacing sb */
296 #define SX_GATHER(sa, sb, d, cnt) (0xe0000000 | ((cnt) << 24) | SX_GTHR | \
297 		 ((sa) << 14) | ((d) << 7) | (sb))
298 #define SX_SCATTER(sa, sb, d, cnt) (0xe0000000 | ((cnt) << 24) | SX_SCTR | \
299 		 ((sa) << 14) | ((d) << 7) | (sb))
300 
301 #endif /* SXREG_H */
302