xref: /netbsd-src/sys/arch/mips/include/asm.h (revision 835080d5e678e165a8c64b1ec4e616adde88537c)
1 /*	$NetBSD: asm.h,v 1.75 2023/09/14 03:37:01 rin Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Ralph Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)machAsmDefs.h	8.1 (Berkeley) 6/10/93
35  */
36 
37 /*
38  * machAsmDefs.h --
39  *
40  *	Macros used when writing assembler programs.
41  *
42  *	Copyright (C) 1989 Digital Equipment Corporation.
43  *	Permission to use, copy, modify, and distribute this software and
44  *	its documentation for any purpose and without fee is hereby granted,
45  *	provided that the above copyright notice appears in all copies.
46  *	Digital Equipment Corporation makes no representations about the
47  *	suitability of this software for any purpose.  It is provided "as is"
48  *	without express or implied warranty.
49  *
50  * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
51  *	v 1.2 89/08/15 18:28:24 rab Exp  SPRITE (DECWRL)
52  */
53 
54 #ifndef _MIPS_ASM_H
55 #define	_MIPS_ASM_H
56 
57 #include <sys/cdefs.h>		/* for API selection */
58 #include <mips/regdef.h>
59 
60 #if defined(_KERNEL_OPT)
61 #include "opt_gprof.h"
62 #include "opt_multiprocessor.h"
63 #endif
64 
65 #ifdef __ASSEMBLER__
66 #define	__BIT(n)	(1 << (n))
67 #define	__BITS(hi,lo)	((~((~0)<<((hi)+1)))&((~0)<<(lo)))
68 
69 #define	__LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
70 #define	__SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
71 #define	__SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
72 #endif	/* __ASSEMBLER__ */
73 
74 #ifndef GPROF
75 #define	_MIPS_ASM_MCOUNT(x)
76 #else
77 /*
78  * Define -pg profile entry code.
79  * Must always be noreorder, must never use a macro instruction.
80  */
81 #if defined(__mips_o32)		/* Old 32-bit ABI */
82 /*
83  * The old ABI version must also decrement two less words off the
84  * stack and the final addiu to t9 must always equal the size of this
85  * _MIPS_ASM_MCOUNT.
86  */
87 #define	_MIPS_ASM_MCOUNT(x)					\
88 	.set	push;						\
89 	.set	noreorder;					\
90 	.set	noat;						\
91 	subu	sp,16;						\
92 	sw	t9,12(sp);					\
93 	move	AT,ra;						\
94 	lui	t9,%hi(_mcount); 				\
95 	addiu	t9,t9,%lo(_mcount);				\
96 	jalr	t9;						\
97 	 nop;							\
98 	lw	t9,4(sp);					\
99 	addiu	sp,8;						\
100 	addiu	t9,40;						\
101 	.set	pop;
102 #elif defined(__mips_o64)	/* Old 64-bit ABI */
103 # error yeahnah
104 #else				/* New (n32/n64) ABI */
105 /*
106  * The new ABI version just needs to put the return address in AT and
107  * call _mcount().  For the no abicalls case, skip the reloc dance.
108  */
109 #ifdef __mips_abicalls
110 #if defined(__mips_n32)		/* n32 */
111 #define	_MIPS_ASM_MCOUNT(x)					\
112 	.set	push;						\
113 	.set	noreorder;					\
114 	.set	noat;						\
115 	subu	sp,16;						\
116 	sw	t9,8(sp);					\
117 	move	AT,ra;						\
118 	lui	t9,%hi(_mcount); 				\
119 	addiu	t9,t9,%lo(_mcount);				\
120 	jalr	t9;						\
121 	 nop;							\
122 	lw	t9,8(sp);					\
123 	addiu	sp,16;						\
124 	.set	pop;
125 #else				/* n64 */
126 #define	_MIPS_ASM_MCOUNT(x)					\
127 	.set	push;						\
128 	.set	noreorder;					\
129 	.set	noat;						\
130 	dsubu	sp,16;						\
131 	sd	gp,0(sp);					\
132 	sd	t9,8(sp);					\
133 	move	AT,ra;						\
134 	lui	gp,%hi(%neg(%gp_rel(x)));			\
135 	daddiu	gp,%lo(%neg(%gp_rel(x)));			\
136 	daddu	gp,gp,t9;					\
137 	ld	t9,%call16(_mcount)(gp);			\
138 	jalr	t9;						\
139 	 nop;							\
140 	ld	gp,0(sp);					\
141 	ld	t9,8(sp);					\
142 	daddiu	sp,16;						\
143 	.set	pop;
144 #endif
145 #else /* !__mips_abicalls */
146 #define	_MIPS_ASM_MCOUNT(x)					\
147 	.set	push;						\
148 	.set	noreorder;					\
149 	.set	noat;						\
150 	move	AT,ra;						\
151 	jal	_mcount;					\
152 	 nop;							\
153 	.set	pop;
154 #endif /* !__mips_abicalls */
155 #endif /* n32/n64 */
156 #endif /* GPROF */
157 
158 #ifdef USE_AENT
159 #define	AENT(x)				\
160 	.aent	x, 0
161 #else
162 #define	AENT(x)
163 #endif
164 
165 /*
166  * WEAK_ALIAS: create a weak alias.
167  */
168 #define	WEAK_ALIAS(alias,sym)						\
169 	.weak alias;							\
170 	alias = sym
171 /*
172  * STRONG_ALIAS: create a strong alias.
173  */
174 #define	STRONG_ALIAS(alias,sym)						\
175 	.globl alias;							\
176 	alias = sym
177 
178 /*
179  * WARN_REFERENCES: create a warning if the specified symbol is referenced.
180  */
181 #define	WARN_REFERENCES(sym,msg)					\
182 	.pushsection __CONCAT(.gnu.warning.,sym);			\
183 	.ascii msg;							\
184 	.popsection
185 
186 /*
187  * STATIC_LEAF_NOPROFILE
188  *	No profilable local leaf routine.
189  */
190 #define	STATIC_LEAF_NOPROFILE(x)	\
191 	.ent	_C_LABEL(x);		\
192 _C_LABEL(x): ;				\
193 	.frame sp, 0, ra
194 
195 /*
196  * LEAF_NOPROFILE
197  *	No profilable leaf routine.
198  */
199 #define	LEAF_NOPROFILE(x)		\
200 	.globl	_C_LABEL(x);		\
201 	STATIC_LEAF_NOPROFILE(x)
202 
203 /*
204  * STATIC_LEAF
205  *	Declare a local leaf function.
206  */
207 #define	STATIC_LEAF(x)			\
208 	STATIC_LEAF_NOPROFILE(x);	\
209 	_MIPS_ASM_MCOUNT(x)
210 
211 /*
212  * LEAF
213  *	A leaf routine does
214  *	- call no other function,
215  *	- never use any register that callee-saved (S0-S8), and
216  *	- not use any local stack storage.
217  */
218 #define	LEAF(x)				\
219 	LEAF_NOPROFILE(x);		\
220 	_MIPS_ASM_MCOUNT(x)
221 
222 /*
223  * STATIC_XLEAF
224  *	declare alternate entry to a static leaf routine
225  */
226 #define	STATIC_XLEAF(x)			\
227 	AENT (_C_LABEL(x));		\
228 _C_LABEL(x):
229 
230 /*
231  * XLEAF
232  *	declare alternate entry to leaf routine
233  */
234 #define	XLEAF(x)			\
235 	.globl	_C_LABEL(x);		\
236 	STATIC_XLEAF(x)
237 
238 /*
239  * STATIC_NESTED_NOPROFILE
240  *	No profilable local nested routine.
241  */
242 #define	STATIC_NESTED_NOPROFILE(x, fsize, retpc)	\
243 	.ent	_C_LABEL(x);				\
244 	.type	_C_LABEL(x), @function;			\
245 _C_LABEL(x): ;						\
246 	.frame	sp, fsize, retpc
247 
248 /*
249  * NESTED_NOPROFILE
250  *	No profilable nested routine.
251  */
252 #define	NESTED_NOPROFILE(x, fsize, retpc)	\
253 	.globl	_C_LABEL(x);			\
254 	STATIC_NESTED_NOPROFILE(x, fsize, retpc)
255 
256 /*
257  * NESTED
258  *	A function calls other functions and needs
259  *	therefore stack space to save/restore registers.
260  */
261 #define	NESTED(x, fsize, retpc)			\
262 	NESTED_NOPROFILE(x, fsize, retpc);	\
263 	_MIPS_ASM_MCOUNT(x)
264 
265 /*
266  * STATIC_NESTED
267  *	No profilable local nested routine.
268  */
269 #define	STATIC_NESTED(x, fsize, retpc)			\
270 	STATIC_NESTED_NOPROFILE(x, fsize, retpc);	\
271 	_MIPS_ASM_MCOUNT(x)
272 
273 /*
274  * XNESTED
275  *	declare alternate entry point to nested routine.
276  */
277 #define	XNESTED(x)			\
278 	.globl	_C_LABEL(x);		\
279 	AENT (_C_LABEL(x));		\
280 _C_LABEL(x):
281 
282 /*
283  * END
284  *	Mark end of a procedure.
285  */
286 #define	END(x)				\
287 	.end _C_LABEL(x);		\
288 	.size _C_LABEL(x), . - _C_LABEL(x)
289 
290 /*
291  * IMPORT -- import external symbol
292  */
293 #define	IMPORT(sym, size)		\
294 	.extern _C_LABEL(sym),size
295 
296 /*
297  * EXPORT -- export definition of symbol
298  */
299 #define	EXPORT(x)			\
300 	.globl	_C_LABEL(x);		\
301 _C_LABEL(x):
302 
303 /*
304  * EXPORT_OBJECT -- export definition of symbol of symbol
305  * type Object, visible to ksyms(4) address search.
306  */
307 #define	EXPORT_OBJECT(x)		\
308 	EXPORT(x);			\
309 	.type	_C_LABEL(x), @object;
310 
311 /*
312  * VECTOR
313  *	exception vector entrypoint
314  *	XXX: regmask should be used to generate .mask
315  */
316 #define	VECTOR(x, regmask)		\
317 	.ent	_C_LABEL(x);		\
318 	EXPORT(x);			\
319 
320 #define	VECTOR_END(x)			\
321 	EXPORT(__CONCAT(x,_end));	\
322 	END(x);				\
323 	.org _C_LABEL(x) + 0x80
324 
325 /*
326  * Macros to panic and printf from assembly language.
327  */
328 #define	PANIC(msg)			\
329 	PTR_LA	a0, 9f;			\
330 	jal	_C_LABEL(panic);	\
331 	nop;				\
332 	MSG(msg)
333 
334 #define	PRINTF(msg)			\
335 	PTR_LA	a0, 9f;			\
336 	jal	_C_LABEL(printf);	\
337 	nop;				\
338 	MSG(msg)
339 
340 #define	MSG(msg)			\
341 	.rdata;				\
342 9:	.asciz	msg;			\
343 	.text
344 
345 #define	ASMSTR(str)			\
346 	.asciz str;			\
347 	.align	3
348 
349 #define	RCSID(x)	.pushsection ".ident","MS",@progbits,1;		\
350 			.asciz x;					\
351 			.popsection
352 
353 /*
354  * XXX retain dialects XXX
355  */
356 #define	ALEAF(x)			XLEAF(x)
357 #define	NLEAF(x)			LEAF_NOPROFILE(x)
358 #define	NON_LEAF(x, fsize, retpc)	NESTED(x, fsize, retpc)
359 #define	NNON_LEAF(x, fsize, retpc)	NESTED_NOPROFILE(x, fsize, retpc)
360 
361 #if defined(__mips_o32)
362 #define	SZREG	4
363 #else
364 #define	SZREG	8
365 #endif
366 
367 #if defined(__mips_o32) || defined(__mips_o64)
368 #define	ALSK	7		/* stack alignment */
369 #define	ALMASK	-7		/* stack alignment */
370 #define	SZFPREG	4
371 #define	FP_L	lwc1
372 #define	FP_S	swc1
373 #else
374 #define	ALSK	15		/* stack alignment */
375 #define	ALMASK	-15		/* stack alignment */
376 #define	SZFPREG	8
377 #define	FP_L	ldc1
378 #define	FP_S	sdc1
379 #endif
380 
381 /*
382  *  standard callframe {
383  *  	register_t cf_args[4];		arg0 - arg3 (only on o32 and o64)
384  *	register_t cf_pad[N];		o32/64 (N=0), n32 (N=1) n64 (N=1)
385  *  	register_t cf_gp;		global pointer (only on n32 and n64)
386  *  	register_t cf_sp;		frame pointer
387  *  	register_t cf_ra;		return address
388  *  };
389  */
390 #if defined(__mips_o32) || defined(__mips_o64)
391 #define	CALLFRAME_SIZ	(SZREG * (4 + 2))
392 #define	CALLFRAME_S0	0
393 #elif defined(__mips_n32) || defined(__mips_n64)
394 #define	CALLFRAME_SIZ	(SZREG * 4)
395 #define	CALLFRAME_S0	(CALLFRAME_SIZ - 4 * SZREG)
396 #endif
397 #ifndef _KERNEL
398 #define	CALLFRAME_GP	(CALLFRAME_SIZ - 3 * SZREG)
399 #endif
400 #define	CALLFRAME_SP	(CALLFRAME_SIZ - 2 * SZREG)
401 #define	CALLFRAME_RA	(CALLFRAME_SIZ - 1 * SZREG)
402 
403 /*
404  * While it would be nice to be compatible with the SGI
405  * REG_L and REG_S macros, because they do not take parameters, it
406  * is impossible to use them with the _MIPS_SIM_ABIX32 model.
407  *
408  * These macros hide the use of mips3 instructions from the
409  * assembler to prevent the assembler from generating 64-bit style
410  * ABI calls.
411  */
412 #ifdef __mips_o32
413 #define	PTR_ADD		add
414 #define	PTR_ADDI	addi
415 #define	PTR_ADDU	addu
416 #define	PTR_ADDIU	addiu
417 #define	PTR_SUB		subu
418 #define	PTR_SUBI	subi
419 #define	PTR_SUBU	subu
420 #define	PTR_SUBIU	subu
421 #define	PTR_L		lw
422 #define	PTR_LA		la
423 #define	PTR_S		sw
424 #define	PTR_SLL		sll
425 #define	PTR_SLLV	sllv
426 #define	PTR_SRL		srl
427 #define	PTR_SRLV	srlv
428 #define	PTR_SRA		sra
429 #define	PTR_SRAV	srav
430 #define	PTR_LL		ll
431 #define	PTR_SC		sc
432 #define	PTR_WORD	.word
433 #define	PTR_SCALESHIFT	2
434 #else /* _MIPS_SZPTR == 64 */
435 #define	PTR_ADD		dadd
436 #define	PTR_ADDI	daddi
437 #define	PTR_ADDU	daddu
438 #define	PTR_ADDIU	daddiu
439 #define	PTR_SUB		dsubu
440 #define	PTR_SUBI	dsubi
441 #define	PTR_SUBU	dsubu
442 #define	PTR_SUBIU	dsubu
443 #ifdef __mips_n32
444 #define	PTR_L		lw
445 #define	PTR_LL		ll
446 #define	PTR_SC		sc
447 #define	PTR_S		sw
448 #define	PTR_SCALESHIFT	2
449 #define	PTR_WORD	.word
450 #else
451 #define	PTR_L		ld
452 #define	PTR_LL		lld
453 #define	PTR_SC		scd
454 #define	PTR_S		sd
455 #define	PTR_SCALESHIFT	3
456 #define	PTR_WORD	.dword
457 #endif
458 #define	PTR_LA		dla
459 #define	PTR_SLL		dsll
460 #define	PTR_SLLV	dsllv
461 #define	PTR_SRL		dsrl
462 #define	PTR_SRLV	dsrlv
463 #define	PTR_SRA		dsra
464 #define	PTR_SRAV	dsrav
465 #endif /* _MIPS_SZPTR == 64 */
466 
467 #if _MIPS_SZINT == 32
468 #define	INT_ADD		add
469 #define	INT_ADDI	addi
470 #define	INT_ADDU	addu
471 #define	INT_ADDIU	addiu
472 #define	INT_SUB		subu
473 #define	INT_SUBI	subi
474 #define	INT_SUBU	subu
475 #define	INT_SUBIU	subu
476 #define	INT_L		lw
477 #define	INT_LA		la
478 #define	INT_S		sw
479 #define	INT_SLL		sll
480 #define	INT_SLLV	sllv
481 #define	INT_SRL		srl
482 #define	INT_SRLV	srlv
483 #define	INT_SRA		sra
484 #define	INT_SRAV	srav
485 #define	INT_LL		ll
486 #define	INT_SC		sc
487 #define	INT_WORD	.word
488 #define	INT_SCALESHIFT	2
489 #else
490 #define	INT_ADD		dadd
491 #define	INT_ADDI	daddi
492 #define	INT_ADDU	daddu
493 #define	INT_ADDIU	daddiu
494 #define	INT_SUB		dsubu
495 #define	INT_SUBI	dsubi
496 #define	INT_SUBU	dsubu
497 #define	INT_SUBIU	dsubu
498 #define	INT_L		ld
499 #define	INT_LA		dla
500 #define	INT_S		sd
501 #define	INT_SLL		dsll
502 #define	INT_SLLV	dsllv
503 #define	INT_SRL		dsrl
504 #define	INT_SRLV	dsrlv
505 #define	INT_SRA		dsra
506 #define	INT_SRAV	dsrav
507 #define	INT_LL		lld
508 #define	INT_SC		scd
509 #define	INT_WORD	.dword
510 #define	INT_SCALESHIFT	3
511 #endif
512 
513 #if _MIPS_SZLONG == 32
514 #define	LONG_ADD	add
515 #define	LONG_ADDI	addi
516 #define	LONG_ADDU	addu
517 #define	LONG_ADDIU	addiu
518 #define	LONG_SUB	subu
519 #define	LONG_SUBI	subi
520 #define	LONG_SUBU	subu
521 #define	LONG_SUBIU	subu
522 #define	LONG_L		lw
523 #define	LONG_LA		la
524 #define	LONG_S		sw
525 #define	LONG_SLL	sll
526 #define	LONG_SLLV	sllv
527 #define	LONG_SRL	srl
528 #define	LONG_SRLV	srlv
529 #define	LONG_SRA	sra
530 #define	LONG_SRAV	srav
531 #define	LONG_LL		ll
532 #define	LONG_SC		sc
533 #define	LONG_WORD	.word
534 #define	LONG_SCALESHIFT	2
535 #else
536 #define	LONG_ADD	dadd
537 #define	LONG_ADDI	daddi
538 #define	LONG_ADDU	daddu
539 #define	LONG_ADDIU	daddiu
540 #define	LONG_SUB	dsubu
541 #define	LONG_SUBI	dsubi
542 #define	LONG_SUBU	dsubu
543 #define	LONG_SUBIU	dsubu
544 #define	LONG_L		ld
545 #define	LONG_LA		dla
546 #define	LONG_S		sd
547 #define	LONG_SLL	dsll
548 #define	LONG_SLLV	dsllv
549 #define	LONG_SRL	dsrl
550 #define	LONG_SRLV	dsrlv
551 #define	LONG_SRA	dsra
552 #define	LONG_SRAV	dsrav
553 #define	LONG_LL		lld
554 #define	LONG_SC		scd
555 #define	LONG_WORD	.dword
556 #define	LONG_SCALESHIFT	3
557 #endif
558 
559 #if SZREG == 4
560 #define	REG_L		lw
561 #define	REG_S		sw
562 #define	REG_LI		li
563 #define	REG_ADDU	addu
564 #define	REG_SLL		sll
565 #define	REG_SLLV	sllv
566 #define	REG_SRL		srl
567 #define	REG_SRLV	srlv
568 #define	REG_SRA		sra
569 #define	REG_SRAV	srav
570 #define	REG_LL		ll
571 #define	REG_SC		sc
572 #define	REG_SCALESHIFT	2
573 #else
574 #define	REG_L		ld
575 #define	REG_S		sd
576 #define	REG_LI		dli
577 #define	REG_ADDU	daddu
578 #define	REG_SLL		dsll
579 #define	REG_SLLV	dsllv
580 #define	REG_SRL		dsrl
581 #define	REG_SRLV	dsrlv
582 #define	REG_SRA		dsra
583 #define	REG_SRAV	dsrav
584 #define	REG_LL		lld
585 #define	REG_SC		scd
586 #define	REG_SCALESHIFT	3
587 #endif
588 
589 #if (MIPS1 + MIPS2) > 0
590 #define	NOP_L		nop
591 #else
592 #define	NOP_L		/* nothing */
593 #endif
594 
595 /* compiler define */
596 #if defined(MULTIPROCESSOR) && defined(__OCTEON__)
597 /*
598  * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on
599  * Octeon memory ordering guarantees and barriers.
600  *
601  * cnMIPS also has a quirk where the store buffer can get clogged and
602  * we need to apply a plunger to it _after_ releasing a lock or else
603  * other CPUs may spin for hundreds of thousands of cycles before they
604  * see the lock is released.  So we also have the quirky SYNC_PLUNGER
605  * barrier as syncw.  See the note in the SYNCW instruction description
606  * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual,
607  * CN78XX-HM-0.99E, September 2014:
608  *
609  *	Core A (writer)
610  *
611  *	SW R1, DATA#		change shared DATA value
612  *	LI R1, 1
613  *	SYNCW# (or SYNCWS)	Perform DATA store before performing FLAG store
614  *	SW R2, FLAG#		say that the shared DATA value is valid
615  *	SYNCW# (or SYNCWS)	Force the FLAG store soon (CN78XX-specific)
616  *
617  *	...
618  *
619  *	The second SYNCW instruction executed by core A is not
620  *	necessary for correctness, but has very important performance
621  *	effects on the CN78XX.  Without it, the store to FLAG may
622  *	linger in core A's write buffer before it becomes visible to
623  *	any other cores.  (If core A is not performing many stores,
624  *	this may add hundreds of thousands of cycles to the flag
625  *	release time since the CN78XX core nominally retains stores to
626  *	attempt to merge them before sending the store on the CMI.)
627  *	Applications should include this second SYNCW instruction after
628  *	flag or lock release.
629  */
630 #define	LLSCSYNC	/* nothing */
631 #define	BDSYNC		sync
632 #define	BDSYNC_ACQ	nop
633 #define	SYNC_ACQ	/* nothing */
634 #define	SYNC_REL	sync 4
635 #define	BDSYNC_PLUNGER	sync 4
636 #define	SYNC_PLUNGER	sync 4
637 #elif defined(MULTIPROCESSOR) && (__mips >= 3 || !defined(__mips_o32))
638 #define	LLSCSYNC	/* nothing */
639 #define	BDSYNC		sync
640 #define	BDSYNC_ACQ	sync
641 #define	SYNC_ACQ	sync
642 #define	SYNC_REL	sync
643 #define	BDSYNC_PLUNGER	nop
644 #define	SYNC_PLUNGER	/* nothing */
645 #else
646 #define	LLSCSYNC	/* nothing */
647 #define	BDSYNC		nop
648 #define	BDSYNC_ACQ	nop
649 #define	SYNC_ACQ	/* nothing */
650 #define	SYNC_REL	/* nothing */
651 #define	BDSYNC_PLUNGER	nop
652 #define	SYNC_PLUNGER	/* nothing */
653 #endif
654 
655 /*
656  * Store-before-load barrier.  Do not use this unless you know what
657  * you're doing.
658  */
659 #ifdef MULTIPROCESSOR
660 #define	SYNC_DEKKER	sync
661 #else
662 #define	SYNC_DEKKER	/* nothing */
663 #endif
664 
665 /*
666  * Store-before-store and load-before-load barriers.  These could be
667  * made weaker than release (load/store-before-store) and acquire
668  * (load-before-load/store) barriers, and newer MIPS does have
669  * instruction encodings for finer-grained barriers like this, but I
670  * dunno how to appropriately conditionalize their use or get the
671  * assembler to be happy with them, so we'll use these definitions for
672  * now.
673  */
674 #define	SYNC_PRODUCER	SYNC_REL
675 #define	SYNC_CONSUMER	SYNC_ACQ
676 
677 /* CPU dependent hook for cp0 load delays */
678 #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
679 #define	MFC0_HAZARD	sll $0,$0,1	/* super scalar nop */
680 #else
681 #define	MFC0_HAZARD	/* nothing */
682 #endif
683 
684 #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
685     _MIPS_ISA == _MIPS_ISA_MIPS32
686 #define	MFC0		mfc0
687 #define	MTC0		mtc0
688 #endif
689 #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
690     _MIPS_ISA == _MIPS_ISA_MIPS64
691 #define	MFC0		dmfc0
692 #define	MTC0		dmtc0
693 #endif
694 
695 #if defined(__mips_o32) || defined(__mips_o64)
696 
697 #ifdef __mips_abicalls
698 #define	CPRESTORE(r)	.cprestore r
699 #define	CPLOAD(r)	.cpload r
700 #else
701 #define	CPRESTORE(r)	/* not needed */
702 #define	CPLOAD(r)	/* not needed */
703 #endif
704 
705 #define	SETUP_GP	\
706 			.set push;				\
707 			.set noreorder;				\
708 			.cpload	t9;				\
709 			.set pop
710 #define	SETUP_GPX(r)	\
711 			.set push;				\
712 			.set noreorder;				\
713 			move	r,ra;	/* save old ra */	\
714 			bal	7f;				\
715 			nop;					\
716 		7:	.cpload	ra;				\
717 			move	ra,r;				\
718 			.set pop
719 #define	SETUP_GPX_L(r,lbl)	\
720 			.set push;				\
721 			.set noreorder;				\
722 			move	r,ra;	/* save old ra */	\
723 			bal	lbl;				\
724 			nop;					\
725 		lbl:	.cpload	ra;				\
726 			move	ra,r;				\
727 			.set pop
728 #define	SAVE_GP(x)	.cprestore x
729 
730 #define	SETUP_GP64(a,b)		/* n32/n64 specific */
731 #define	SETUP_GP64_R(a,b)	/* n32/n64 specific */
732 #define	SETUP_GPX64(a,b)	/* n32/n64 specific */
733 #define	SETUP_GPX64_L(a,b,c)	/* n32/n64 specific */
734 #define	RESTORE_GP64		/* n32/n64 specific */
735 #define	USE_ALT_CP(a)		/* n32/n64 specific */
736 #endif /* __mips_o32 || __mips_o64 */
737 
738 #if defined(__mips_o32) || defined(__mips_o64)
739 #define	REG_PROLOGUE	.set push
740 #define	REG_EPILOGUE	.set pop
741 #endif
742 #if defined(__mips_n32) || defined(__mips_n64)
743 #define	REG_PROLOGUE	.set push ; .set mips3
744 #define	REG_EPILOGUE	.set pop
745 #endif
746 
747 #if defined(__mips_n32) || defined(__mips_n64)
748 #define	SETUP_GP		/* o32 specific */
749 #define	SETUP_GPX(r)		/* o32 specific */
750 #define	SETUP_GPX_L(r,lbl)	/* o32 specific */
751 #define	SAVE_GP(x)		/* o32 specific */
752 #define	SETUP_GP64(a,b)		.cpsetup t9, a, b
753 #define	SETUP_GPX64(a,b)	\
754 				.set push;			\
755 				move	b,ra;			\
756 				.set noreorder;			\
757 				bal	7f;			\
758 				nop;				\
759 			7:	.set pop;			\
760 				.cpsetup ra, a, 7b;		\
761 				move	ra,b
762 #define	SETUP_GPX64_L(a,b,c)	\
763 				.set push;			\
764 				move	b,ra;			\
765 				.set noreorder;			\
766 				bal	c;			\
767 				nop;				\
768 			c:	.set pop;			\
769 				.cpsetup ra, a, c;		\
770 				move	ra,b
771 #define	RESTORE_GP64		.cpreturn
772 #define	USE_ALT_CP(a)		.cplocal a
773 #endif	/* __mips_n32 || __mips_n64 */
774 
775 /*
776  * The DYNAMIC_STATUS_MASK option adds an additional masking operation
777  * when updating the hardware interrupt mask in the status register.
778  *
779  * This is useful for platforms that need to at run-time mask
780  * interrupts based on motherboard configuration or to handle
781  * slowly clearing interrupts.
782  *
783  * XXX this is only currently implemented for mips3.
784  */
785 #ifdef MIPS_DYNAMIC_STATUS_MASK
786 #define	DYNAMIC_STATUS_MASK(sr,scratch)	\
787 	lw	scratch, mips_dynamic_status_mask; \
788 	and	sr, sr, scratch
789 
790 #define	DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)		\
791 	ori	sr, (MIPS_INT_MASK | MIPS_SR_INT_IE);	\
792 	DYNAMIC_STATUS_MASK(sr,scratch1)
793 #else
794 #define	DYNAMIC_STATUS_MASK(sr,scratch)
795 #define	DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
796 #endif
797 
798 /* See lock_stubs.S. */
799 #define	LOG2_MIPS_LOCK_RAS_SIZE	8
800 #define	MIPS_LOCK_RAS_SIZE	256	/* 16 bytes left over */
801 
802 #define	CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
803 
804 #endif /* _MIPS_ASM_H */
805