xref: /netbsd-src/external/gpl3/gcc/dist/libgcc/config/riscv/atomic.c (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1b17d1066Smrg /* Legacy sub-word atomics for RISC-V.
2b17d1066Smrg 
3*b1e83836Smrg    Copyright (C) 2016-2022 Free Software Foundation, Inc.
4b17d1066Smrg 
5b17d1066Smrg This file is part of GCC.
6b17d1066Smrg 
7b17d1066Smrg GCC is free software; you can redistribute it and/or modify it under
8b17d1066Smrg the terms of the GNU General Public License as published by the Free
9b17d1066Smrg Software Foundation; either version 3, or (at your option) any later
10b17d1066Smrg version.
11b17d1066Smrg 
12b17d1066Smrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13b17d1066Smrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14b17d1066Smrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15b17d1066Smrg for more details.
16b17d1066Smrg 
17b17d1066Smrg Under Section 7 of GPL version 3, you are granted additional
18b17d1066Smrg permissions described in the GCC Runtime Library Exception, version
19b17d1066Smrg 3.1, as published by the Free Software Foundation.
20b17d1066Smrg 
21b17d1066Smrg You should have received a copy of the GNU General Public License and
22b17d1066Smrg a copy of the GCC Runtime Library Exception along with this program;
23b17d1066Smrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24b17d1066Smrg <http://www.gnu.org/licenses/>.  */
25b17d1066Smrg 
26b17d1066Smrg #ifdef __riscv_atomic
27b17d1066Smrg 
28b17d1066Smrg #include <stdbool.h>
29b17d1066Smrg 
30b17d1066Smrg #define INVERT		"not %[tmp1], %[tmp1]\n\t"
31b17d1066Smrg #define DONT_INVERT	""
32b17d1066Smrg 
33b17d1066Smrg #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop)	\
34b17d1066Smrg   type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v)	\
35b17d1066Smrg   {									\
36b17d1066Smrg     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
37b17d1066Smrg     int shift = (((unsigned long) p) & 3) * 8;				\
38b17d1066Smrg     unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift;		\
39b17d1066Smrg     unsigned old, tmp1, tmp2;						\
40b17d1066Smrg 									\
41b17d1066Smrg     asm volatile ("1:\n\t"						\
42b17d1066Smrg 		  "lr.w.aq %[old], %[mem]\n\t"				\
43b17d1066Smrg 		  #insn " %[tmp1], %[old], %[value]\n\t"		\
44b17d1066Smrg 		  invert						\
45b17d1066Smrg 		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
46b17d1066Smrg 		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
47b17d1066Smrg 		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
48b17d1066Smrg 		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
49b17d1066Smrg 		  "bnez %[tmp1], 1b"					\
50b17d1066Smrg 		  : [old] "=&r" (old),					\
51b17d1066Smrg 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
52b17d1066Smrg 		    [tmp1] "=&r" (tmp1),				\
53b17d1066Smrg 		    [tmp2] "=&r" (tmp2)					\
54b17d1066Smrg 		  : [value] "r" (((unsigned) v) << shift),		\
55b17d1066Smrg 		    [mask] "r" (mask),					\
56b17d1066Smrg 		    [not_mask] "r" (~mask));				\
57b17d1066Smrg 									\
58b17d1066Smrg     return (type) (old >> shift);					\
59b17d1066Smrg   }									\
60b17d1066Smrg 									\
61b17d1066Smrg   type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v)	\
62b17d1066Smrg   {									\
63b17d1066Smrg     type o = __sync_fetch_and_ ## opname ## _ ## size (p, v);		\
64b17d1066Smrg     return cop;								\
65b17d1066Smrg   }
66b17d1066Smrg 
67b17d1066Smrg #define GENERATE_COMPARE_AND_SWAP(type, size)				\
68b17d1066Smrg   type __sync_val_compare_and_swap_ ## size (type *p, type o, type n)	\
69b17d1066Smrg   {									\
70b17d1066Smrg     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
71b17d1066Smrg     int shift = (((unsigned long) p) & 3) * 8;				\
72b17d1066Smrg     unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift;		\
73b17d1066Smrg     unsigned old, tmp1;							\
74b17d1066Smrg 									\
75b17d1066Smrg     asm volatile ("1:\n\t"						\
76b17d1066Smrg 		  "lr.w.aq %[old], %[mem]\n\t"				\
77b17d1066Smrg 		  "and %[tmp1], %[old], %[mask]\n\t"			\
78b17d1066Smrg 		  "bne %[tmp1], %[o], 1f\n\t"				\
79b17d1066Smrg 		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
80b17d1066Smrg 		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
81b17d1066Smrg 		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
82b17d1066Smrg 		  "bnez %[tmp1], 1b\n\t"				\
83b17d1066Smrg 		  "1:"							\
84b17d1066Smrg 		  : [old] "=&r" (old),					\
85b17d1066Smrg 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
86b17d1066Smrg 		    [tmp1] "=&r" (tmp1)					\
87b17d1066Smrg 		  : [o] "r" ((((unsigned) o) << shift) & mask),		\
88b17d1066Smrg 		    [n] "r" ((((unsigned) n) << shift) & mask),		\
89b17d1066Smrg 		    [mask] "r" (mask),					\
90b17d1066Smrg 		    [not_mask] "r" (~mask));				\
91b17d1066Smrg 									\
92b17d1066Smrg     return (type) (old >> shift);					\
93b17d1066Smrg   }									\
94b17d1066Smrg   bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n)	\
95b17d1066Smrg   {									\
96b17d1066Smrg     return __sync_val_compare_and_swap(p, o, n) == o;			\
97b17d1066Smrg   }
98b17d1066Smrg 
99b17d1066Smrg #define GENERATE_ALL(type, size)					\
100b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v)	\
101b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v)	\
102b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v)	\
103b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v)	\
104b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v)		\
105b17d1066Smrg   GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v))	\
106b17d1066Smrg   GENERATE_COMPARE_AND_SWAP(type, size)
107b17d1066Smrg 
108b17d1066Smrg GENERATE_ALL(unsigned char, 1)
109b17d1066Smrg GENERATE_ALL(unsigned short, 2)
110b17d1066Smrg 
111b17d1066Smrg #endif
112