xref: /netbsd-src/sys/arch/vax/include/macros.h (revision 1ca5c1b28139779176bd5c13ad7c5f25c0bcd5f8)
1 /*	$NetBSD: macros.h,v 1.23 2001/06/03 15:08:32 ragge Exp $	*/
2 
3 /*
4  * Copyright (c) 1994, 1998, 2000 Ludd, University of Lule}, Sweden.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *     This product includes software developed at Ludd, University of Lule}.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33  /* All bugs are subject to removal without further notice */
34 
35 #if !defined(_VAX_MACROS_H_) && !defined(lint)
36 #define _VAX_MACROS_H_
37 
38 /* Here general macros are supposed to be stored */
39 
40 static __inline__ int
41 ffs(int reg)
42 {
43 	register int val;
44 
45 	__asm__ __volatile ("ffs $0,$32,%1,%0
46 			bneq 1f
47 			mnegl $1,%0
48 		1:	incl %0"
49 			: "=&r" (val)
50 			: "r" (reg) );
51 	return	val;
52 }
53 
54 static __inline__ void
55 _remque(void *p)
56 {
57 	__asm__ __volatile ("remque (%0),%0;clrl 4(%0)"
58 			:
59 			: "r" (p)
60 			: "memory" );
61 }
62 
63 static __inline__ void
64 _insque(void *p, void *q)
65 {
66 	__asm__ __volatile ("insque (%0),(%1)"
67 			:
68 			: "r" (p),"r" (q)
69 			: "memory" );
70 }
71 
72 static __inline__ void *
73 memcpy(void *to, const void *from, size_t len)
74 {
75 	__asm__ __volatile ("movc3 %0,%1,%2"
76 			:
77 			: "g" (len), "m" (*(char *)from), "m" (*(char *)to)
78 			:"r0","r1","r2","r3","r4","r5","memory","cc");
79 	return to;
80 }
81 static __inline__ void *
82 memmove(void *to, const void *from, size_t len)
83 {
84 	__asm__ __volatile ("movc3 %0,%1,%2"
85 			:
86 			: "g" (len), "m" (*(char *)from), "m" (*(char *)to)
87 			:"r0","r1","r2","r3","r4","r5","memory","cc");
88 	return to;
89 }
90 
91 static __inline__ void
92 bcopy(const void *from, void *to, size_t len)
93 {
94 	__asm__ __volatile ("movc3 %0,%1,%2"
95 			:
96 			: "g" (len), "m" (*(char *)from), "m" (*(char *)to)
97 			:"r0","r1","r2","r3","r4","r5","memory","cc");
98 }
99 
100 void	blkclr __P((void *, size_t));
101 
102 static __inline__ void *
103 memset(void *block, int c, size_t len)
104 {
105 	if (len > 65535)
106 		blkclr(block, len);
107 	else {
108 		__asm__ __volatile ("movc5 $0,(sp),%2,%1,%0"
109 			:
110 			: "m" (*(char *)block), "g" (len), "g" (c)
111 			:"r0","r1","r2","r3","r4","r5","memory","cc");
112 	}
113 	return block;
114 }
115 
116 static __inline__ void
117 bzero(void *block, size_t len)
118 {
119 	if (len > 65535)
120 		blkclr(block, len);
121 	else {
122 		__asm__ __volatile ("movc5 $0,(sp),$0,%1,%0"
123 			:
124 			: "m" (*(char *)block), "g" (len)
125 			:"r0","r1","r2","r3","r4","r5","memory","cc");
126 	}
127 }
128 
129 /* XXX - the return syntax of memcmp is wrong */
130 static __inline__ int
131 memcmp(const void *b1, const void *b2, size_t len)
132 {
133 	register int ret;
134 
135 	__asm__ __volatile("cmpc3 %3,(%1),(%2);movl r0,%0"
136 			: "=r" (ret)
137 			: "r" (b1), "r" (b2), "r" (len)
138 			: "r0","r1","r2","r3" );
139 	return ret;
140 }
141 
142 static __inline__ int
143 bcmp(const void *b1, const void *b2, size_t len)
144 {
145 	register int ret;
146 
147 	__asm__ __volatile("cmpc3 %3,(%1),(%2);movl r0,%0"
148 			: "=r" (ret)
149 			: "r" (b1), "r" (b2), "r" (len)
150 			: "r0","r1","r2","r3" );
151 	return ret;
152 }
153 
154 /* Begin nya */
155 static __inline__ size_t
156 strlen(const char *cp)
157 {
158         register size_t ret;
159 
160         __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,%0"
161                         : "=r" (ret)
162                         : "r" (cp)
163                         : "r0","r1","cc" );
164         return  ret;
165 }
166 
167 static __inline__ char *
168 strcat(char *cp, const char *c2)
169 {
170         __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,r2;incl r2;
171                             locc $0,$65535,(%0);movc3 r2,(%1),(r1)"
172                         :
173                         : "r" (cp), "r" (c2)
174                         : "r0","r1","r2","r3","r4","r5","memory","cc");
175         return  cp;
176 }
177 
178 static __inline__ char *
179 strncat(char *cp, const char *c2, size_t count)
180 {
181         __asm__ __volatile("locc $0,%2,(%1);subl3 r0,%2,r2;
182                             locc $0,$65535,(%0);movc3 r2,(%1),(r1);movb $0,(r3)"
183                         :
184                         : "r" (cp), "r" (c2), "g"(count)
185                         : "r0","r1","r2","r3","r4","r5","memory","cc");
186         return  cp;
187 }
188 
189 static __inline__ char *
190 strcpy(char *cp, const char *c2)
191 {
192         __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,r2;
193                             movc3 r2,(%1),(%0);movb $0,(r3)"
194                         :
195                         : "r" (cp), "r" (c2)
196                         : "r0","r1","r2","r3","r4","r5","memory","cc");
197         return  cp;
198 }
199 
200 static __inline__ char *
201 strncpy(char *cp, const char *c2, size_t len)
202 {
203         __asm__ __volatile("movl %2,r2;locc $0,r2,(%1);beql 1f;subl3 r0,%2,r2;
204                             clrb (%0)[r2];1:;movc3 r2,(%1),(%0)"
205                         :
206                         : "r" (cp), "r" (c2), "g"(len)
207                         : "r0","r1","r2","r3","r4","r5","memory","cc");
208         return  cp;
209 }
210 
211 static __inline__ void *
212 memchr(const void *cp, int c, size_t len)
213 {
214         void *ret;
215         __asm__ __volatile("locc %2,%3,(%1);bneq 1f;clrl r1;1:movl r1,%0"
216                         : "=g"(ret)
217                         : "r" (cp), "r" (c), "g"(len)
218                         : "r0","r1","cc");
219         return  ret;
220 }
221 
222 static __inline__ int
223 strcmp(const char *cp, const char *c2)
224 {
225         register int ret;
226         __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,r0;incl r0;
227                             cmpc3 r0,(%1),(%2);beql 1f;movl $1,r2;
228                             cmpb (r1),(r3);bcc 1f;movl $-1,r2;1:movl r2,%0"
229                         : "=g"(ret)
230                         : "r" (cp), "r" (c2)
231                         : "r0","r1","r2","r3","cc");
232         return  ret;
233 }
234 /* End nya */
235 
236 #if 0 /* unused, but no point in deleting it since it _is_ an instruction */
237 static __inline__ int locc(int mask, char *cp, size_t size){
238 	register ret;
239 
240 	__asm__ __volatile("locc %1,%2,(%3);movl r0,%0"
241 			: "=r" (ret)
242 			: "r" (mask),"r"(size),"r"(cp)
243 			: "r0","r1" );
244 	return	ret;
245 }
246 #endif
247 
248 static __inline__ int
249 scanc(u_int size, const u_char *cp, const u_char *table, int mask)
250 {
251 	register int ret;
252 
253 	__asm__ __volatile("scanc	%1,(%2),(%3),%4;movl r0,%0"
254 			: "=g"(ret)
255 			: "r"(size),"r"(cp),"r"(table),"r"(mask)
256 			: "r0","r1","r2","r3" );
257 	return ret;
258 }
259 
260 static __inline__ int
261 skpc(int mask, size_t size, u_char *cp)
262 {
263 	register int ret;
264 
265 	__asm__ __volatile("skpc %1,%2,(%3);movl r0,%0"
266 			: "=g"(ret)
267 			: "r"(mask),"r"(size),"r"(cp)
268 			: "r0","r1" );
269 	return	ret;
270 }
271 
272 /*
273  * Set/clear a bit at a memory position; interlocked.
274  * Return 0 if already set, 1 otherwise.
275  */
276 static __inline__ int
277 bbssi(int bitnr, long *addr)
278 {
279 	register int ret;
280 
281 	__asm__ __volatile("clrl r0;bbssi %1,%2,1f;incl r0;1:movl r0,%0"
282 		: "=&r"(ret)
283 		: "g"(bitnr),"m"(*addr)
284 		: "r0","cc","memory");
285 	return ret;
286 }
287 
288 static __inline__ int
289 bbcci(int bitnr, long *addr)
290 {
291 	register int ret;
292 
293 	__asm__ __volatile("clrl r0;bbcci %1,%2,1f;incl r0;1:movl r0,%0"
294 		: "=&r"(ret)
295 		: "g"(bitnr),"m"(*addr)
296 		: "r0","cc","memory");
297 	return ret;
298 }
299 
300 #define setrunqueue(p)	\
301 	__asm__ __volatile("movl %0,r0;jsb Setrq":: "g"(p):"r0","r1","r2");
302 
303 #define remrunqueue(p)	\
304 	__asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
305 
306 #define cpu_switch(p) \
307 	__asm__ __volatile("movl %0,r6;movpsl -(sp);jsb Swtch" \
308 	    ::"g"(p):"r0","r1","r2","r3","r4","r5","r6");
309 
310 /*
311  * Interlock instructions. Used both in multiprocessor environments to
312  * lock between CPUs and in uniprocessor systems when locking is required
313  * between I/O devices and the master CPU.
314  */
315 /*
316  * Insqti() locks and inserts an element into the end of a queue.
317  * Returns -1 if interlock failed, 1 if inserted OK and 0 if first in queue.
318  */
319 static __inline__ int
320 insqti(void *entry, void *header) {
321 	register int ret;
322 
323 	__asm__ __volatile("
324 			mnegl $1,%0;
325 			insqti (%1),(%2);
326 			bcs 1f;			# failed insert
327 			beql 2f;		# jump if first entry
328 			movl $1,%0;
329 			brb 1f;
330 		2:	clrl %0;
331 			1:;"
332 			: "=&g"(ret)
333 			: "r"(entry), "r"(header)
334 			: "memory");
335 
336 	return ret;
337 }
338 
339 /*
340  * Remqhi() removes an element from the head of the queue.
341  * Returns -1 if interlock failed, 0 if queue empty, address of the
342  * removed element otherwise.
343  */
344 static __inline__ void *
345 remqhi(void *header) {
346 	register void *ret;
347 
348 	__asm__ __volatile("
349 			remqhi (%1),%0;
350 			bcs 1f;			# failed interlock
351 			bvs 2f;			# nothing was removed
352 			brb 3f;
353 		1:	mnegl $1,%0;
354 			brb 3f;
355 		2:	clrl %0;
356 			3:;"
357 			: "=&g"(ret)
358 			: "r"(header)
359 			: "memory");
360 
361 	return ret;
362 }
363 #define	ILCK_FAILED	-1	/* Interlock failed */
364 #define	Q_EMPTY		0	/* Queue is/was empty */
365 #define	Q_OK		1	/* Inserted OK */
366 
367 #endif	/* _VAX_MACROS_H_ */
368