xref: /netbsd-src/sys/arch/vax/include/macros.h (revision 2a399c6883d870daece976daec6ffa7bb7f934ce)
1 /*	$NetBSD: macros.h,v 1.13 1997/11/05 04:23:35 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *     This product includes software developed at Ludd, University of Lule}.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33  /* All bugs are subject to removal without further notice */
34 
35 #if !defined(_VAX_MACROS_H_) && !defined(STANDALONE) && \
36 	(!defined(_LOCORE) && defined(_VAX_INLINE_))
37 #define	_VAX_MACROS_H_
38 
39 /* Here general macros are supposed to be stored */
40 
41 static __inline__ int ffs(int reg){
42 	register int val;
43 
44 	__asm__ __volatile ("ffs	$0,$32,%1,%0
45 			bneq	1f
46 			mnegl	$1,%0
47 		1:	incl    %0"
48 			: "&=r" (val)
49 			: "r" (reg) );
50 	return	val;
51 }
52 
53 static __inline__ void _remque(void*p){
54 	__asm__ __volatile ("remque (%0),%0;clrl 4(%0)"
55 			:
56 			: "r" (p)
57 			: "memory" );
58 }
59 
60 static __inline__ void _insque(void*p, void*q) {
61         __asm__ __volatile ("insque (%0), (%1)"
62                         :
63                         : "r" (p),"r" (q)
64                         : "memory" );
65 }
66 
67 #define	bitset(bitnr,var)				\
68 ({	__asm__ __volatile ("bbss %0,%1,1f;1:;"		\
69 			:				\
70 			: "g" (bitnr), "g" (var));	\
71 })
72 
73 #define	bitclear(bitnr,var)				\
74 ({      __asm__ __volatile ("bbsc %0,%1,1f;1:;"             \
75                         :                               \
76                         : "g" (bitnr), "g" (var));      \
77 })
78 
79 #define	bitisset(bitnr,var)				\
80 ({							\
81 	register int val;                               \
82 	__asm__ __volatile ("clrl %0;bbc %1,%2,1f;incl %0;1:;" \
83 			: "=g" (val)			\
84 			: "g" (bitnr), "g" (var));	\
85 	val;						\
86 })
87 
88 #define bitisclear(bitnr,var)                                \
89 ({                                                      \
90         register int val;                               \
91         __asm__ __volatile ("clrl %0;bbs %1,%2,1f;incl %0;1:;" \
92                         : "=g" (val)                    \
93                         : "g" (bitnr), "g" (var));      \
94 	val;						\
95 })
96 static __inline__ void bcopy(const void*from, void*toe, u_int len) {
97 	__asm__ __volatile ("movc3 %0,(%1),(%2)"
98 			:
99 			: "r" (len),"r" (from),"r"(toe)
100 			:"r0","r1","r2","r3","r4","r5");
101 }
102 
103 static __inline__ void bzero(void*block, u_int len){
104 	__asm__ __volatile ("movc5 $0,(%0),$0,%1,(%0)"
105 			:
106 			: "r" (block), "r" (len)
107 			:"r0","r1","r2","r3","r4","r5");
108 }
109 
110 static __inline__ int bcmp(const void *b1, const void *b2, size_t len){
111 	register ret;
112 
113 	__asm__ __volatile("cmpc3 %3,(%1),(%2);movl r0,%0"
114 			: "=r" (ret)
115 			: "r" (b1), "r" (b2), "r" (len)
116 			: "r0","r1","r2","r3" );
117 	return ret;
118 }
119 
120 #if 0 /* unused, but no point in deleting it since it _is_ an instruction */
121 static __inline__ int locc(int mask, char *cp,u_int size){
122 	register ret;
123 
124 	__asm__ __volatile("locc %1,%2,(%3);movl r0,%0"
125 			: "=r" (ret)
126 			: "r" (mask),"r"(size),"r"(cp)
127 			: "r0","r1" );
128 	return	ret;
129 }
130 #endif
131 
132 static __inline__ int
133 scanc(u_int size, const u_char *cp, const u_char *table, int mask){
134 	register ret;
135 
136 	__asm__ __volatile("scanc	%1,(%2),(%3),%4;movl r0,%0"
137 			: "=g"(ret)
138 			: "r"(size),"r"(cp),"r"(table),"r"(mask)
139 			: "r0","r1","r2","r3" );
140 	return ret;
141 }
142 
143 static __inline__ int skpc(int mask, size_t size, u_char *cp){
144 	register ret;
145 
146 	__asm__ __volatile("skpc %1,%2,(%3);movl r0,%0"
147 			: "=g"(ret)
148 			: "r"(mask),"r"(size),"r"(cp)
149 			: "r0","r1" );
150 	return	ret;
151 }
152 #if 0
153 static __inline__ int imin(int a, int b){
154 	__asm__ __volatile("cmpl %0,%2;bleq 1f;movl %2,%0;1:"
155 			: "=r"(a)
156 			: "r"(a),"r"(b) );
157 	return a;
158 }
159 
160 static __inline__ int imax(int a, int b){
161         __asm__ __volatile("cmpl %0,%2;bgeq 1f;movl %2,%0;1:"
162                         : "=r"(a)
163                         : "r"(a),"r"(b) );
164         return a;
165 }
166 
167 static __inline__ int min(int a, int b){
168         __asm__ __volatile("cmpl %0,%2;bleq 1f;movl %2,%0;1:"
169                         : "=r"(a)
170                         : "r"(a),"r"(b) );
171         return a;
172 }
173 
174 static __inline__ int max(int a, int b){
175         __asm__ __volatile("cmpl %0,%2;bgeq 1f;movl %2,%0;1:"
176                         : "=r"(a)
177                         : "r"(a),"r"(b) );
178         return a;
179 }
180 #endif
181 
182 static __inline__ void blkcpy(const void*from, void*to, u_int len) {
183 	__asm__ __volatile("
184 			movl    %0,r1
185 			movl    %1,r3
186 			movl	%2,r6
187 			jbr 2f
188 		1:	subl2   r0,r6
189 			movc3   r0,(r1),(r3)
190 		2:	movzwl  $65535,r0
191 			cmpl    r6,r0
192 			jgtr    1b
193 			movc3   r6,(r1),(r3)"
194 			:
195 			: "g" (from), "g" (to), "g" (len)
196 			: "r0","r1","r2","r3","r4","r5", "r6" );
197 }
198 
199 static __inline__ void blkclr(void *blk, int len) {
200 	__asm__ __volatile("
201 			movl	%0, r3
202 			movl	%1, r6
203 			jbr	2f
204 		1:	subl2	r0, r6
205 			movc5	$0,(r3),$0,r0,(r3)
206 		2:	movzwl	$65535,r0
207 			cmpl	r6, r0
208 			jgtr	1b
209 			movc5	$0,(r3),$0,r6,(r3)"
210 			:
211 			: "g" (blk), "g" (len)
212 			: "r0","r1","r2","r3","r4","r5", "r6" );
213 }
214 
215 #define	setrunqueue(p)	\
216 	__asm__ __volatile("movl %0,r0;jsb Setrq":: "g"(p):"r0","r1","r2");
217 
218 #define	remrunqueue(p)	\
219 	__asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
220 
221 #define	cpu_switch(p) \
222 	__asm__ __volatile("movl %0,r0;movpsl -(sp);jsb Swtch" \
223 	    ::"g"(p):"r0","r1","r2","r3");
224 #endif	/* _VAX_MACROS_H_ */
225