xref: /netbsd-src/external/gpl3/gcc.old/dist/libgcc/config/riscv/atomic.c (revision 4c3eb207d36f67d31994830c0a694161fc1ca39b)
13ad841b2Smrg /* Legacy sub-word atomics for RISC-V.
23ad841b2Smrg 
3*4c3eb207Smrg    Copyright (C) 2016-2020 Free Software Foundation, Inc.
43ad841b2Smrg 
53ad841b2Smrg This file is part of GCC.
63ad841b2Smrg 
73ad841b2Smrg GCC is free software; you can redistribute it and/or modify it under
83ad841b2Smrg the terms of the GNU General Public License as published by the Free
93ad841b2Smrg Software Foundation; either version 3, or (at your option) any later
103ad841b2Smrg version.
113ad841b2Smrg 
123ad841b2Smrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
133ad841b2Smrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
143ad841b2Smrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
153ad841b2Smrg for more details.
163ad841b2Smrg 
173ad841b2Smrg Under Section 7 of GPL version 3, you are granted additional
183ad841b2Smrg permissions described in the GCC Runtime Library Exception, version
193ad841b2Smrg 3.1, as published by the Free Software Foundation.
203ad841b2Smrg 
213ad841b2Smrg You should have received a copy of the GNU General Public License and
223ad841b2Smrg a copy of the GCC Runtime Library Exception along with this program;
233ad841b2Smrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
243ad841b2Smrg <http://www.gnu.org/licenses/>.  */
253ad841b2Smrg 
263ad841b2Smrg #ifdef __riscv_atomic
273ad841b2Smrg 
283ad841b2Smrg #include <stdbool.h>
293ad841b2Smrg 
303ad841b2Smrg #define INVERT		"not %[tmp1], %[tmp1]\n\t"
313ad841b2Smrg #define DONT_INVERT	""
323ad841b2Smrg 
333ad841b2Smrg #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop)	\
343ad841b2Smrg   type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v)	\
353ad841b2Smrg   {									\
363ad841b2Smrg     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
373ad841b2Smrg     int shift = (((unsigned long) p) & 3) * 8;				\
383ad841b2Smrg     unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift;		\
393ad841b2Smrg     unsigned old, tmp1, tmp2;						\
403ad841b2Smrg 									\
413ad841b2Smrg     asm volatile ("1:\n\t"						\
423ad841b2Smrg 		  "lr.w.aq %[old], %[mem]\n\t"				\
433ad841b2Smrg 		  #insn " %[tmp1], %[old], %[value]\n\t"		\
443ad841b2Smrg 		  invert						\
453ad841b2Smrg 		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
463ad841b2Smrg 		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
473ad841b2Smrg 		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
483ad841b2Smrg 		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
493ad841b2Smrg 		  "bnez %[tmp1], 1b"					\
503ad841b2Smrg 		  : [old] "=&r" (old),					\
513ad841b2Smrg 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
523ad841b2Smrg 		    [tmp1] "=&r" (tmp1),				\
533ad841b2Smrg 		    [tmp2] "=&r" (tmp2)					\
543ad841b2Smrg 		  : [value] "r" (((unsigned) v) << shift),		\
553ad841b2Smrg 		    [mask] "r" (mask),					\
563ad841b2Smrg 		    [not_mask] "r" (~mask));				\
573ad841b2Smrg 									\
583ad841b2Smrg     return (type) (old >> shift);					\
593ad841b2Smrg   }									\
603ad841b2Smrg 									\
613ad841b2Smrg   type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v)	\
623ad841b2Smrg   {									\
633ad841b2Smrg     type o = __sync_fetch_and_ ## opname ## _ ## size (p, v);		\
643ad841b2Smrg     return cop;								\
653ad841b2Smrg   }
663ad841b2Smrg 
673ad841b2Smrg #define GENERATE_COMPARE_AND_SWAP(type, size)				\
683ad841b2Smrg   type __sync_val_compare_and_swap_ ## size (type *p, type o, type n)	\
693ad841b2Smrg   {									\
703ad841b2Smrg     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
713ad841b2Smrg     int shift = (((unsigned long) p) & 3) * 8;				\
723ad841b2Smrg     unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift;		\
733ad841b2Smrg     unsigned old, tmp1;							\
743ad841b2Smrg 									\
753ad841b2Smrg     asm volatile ("1:\n\t"						\
763ad841b2Smrg 		  "lr.w.aq %[old], %[mem]\n\t"				\
773ad841b2Smrg 		  "and %[tmp1], %[old], %[mask]\n\t"			\
783ad841b2Smrg 		  "bne %[tmp1], %[o], 1f\n\t"				\
793ad841b2Smrg 		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
803ad841b2Smrg 		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
813ad841b2Smrg 		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
823ad841b2Smrg 		  "bnez %[tmp1], 1b\n\t"				\
833ad841b2Smrg 		  "1:"							\
843ad841b2Smrg 		  : [old] "=&r" (old),					\
853ad841b2Smrg 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
863ad841b2Smrg 		    [tmp1] "=&r" (tmp1)					\
873ad841b2Smrg 		  : [o] "r" ((((unsigned) o) << shift) & mask),		\
883ad841b2Smrg 		    [n] "r" ((((unsigned) n) << shift) & mask),		\
893ad841b2Smrg 		    [mask] "r" (mask),					\
903ad841b2Smrg 		    [not_mask] "r" (~mask));				\
913ad841b2Smrg 									\
923ad841b2Smrg     return (type) (old >> shift);					\
933ad841b2Smrg   }									\
943ad841b2Smrg   bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n)	\
953ad841b2Smrg   {									\
963ad841b2Smrg     return __sync_val_compare_and_swap(p, o, n) == o;			\
973ad841b2Smrg   }
983ad841b2Smrg 
993ad841b2Smrg #define GENERATE_ALL(type, size)					\
1003ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v)	\
1013ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v)	\
1023ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v)	\
1033ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v)	\
1043ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v)		\
1053ad841b2Smrg   GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v))	\
1063ad841b2Smrg   GENERATE_COMPARE_AND_SWAP(type, size)
1073ad841b2Smrg 
1083ad841b2Smrg GENERATE_ALL(unsigned char, 1)
1093ad841b2Smrg GENERATE_ALL(unsigned short, 2)
1103ad841b2Smrg 
1113ad841b2Smrg #endif
112