xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/aarch64/atomics.md (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1;; Machine description for AArch64 processor synchronization primitives.
2;; Copyright (C) 2009-2017 Free Software Foundation, Inc.
3;; Contributed by ARM Ltd.
4;;
5;; This file is part of GCC.
6;;
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11;;
12;; GCC is distributed in the hope that it will be useful, but
13;; WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15;; General Public License for more details.
16;;
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; Instruction patterns.
22
23(define_expand "atomic_compare_and_swap<mode>"
24  [(match_operand:SI 0 "register_operand" "")			;; bool out
25   (match_operand:ALLI 1 "register_operand" "")			;; val out
26   (match_operand:ALLI 2 "aarch64_sync_memory_operand" "")	;; memory
27   (match_operand:ALLI 3 "general_operand" "")			;; expected
28   (match_operand:ALLI 4 "register_operand" "")			;; desired
29   (match_operand:SI 5 "const_int_operand")			;; is_weak
30   (match_operand:SI 6 "const_int_operand")			;; mod_s
31   (match_operand:SI 7 "const_int_operand")]			;; mod_f
32  ""
33  {
34    aarch64_expand_compare_and_swap (operands);
35    DONE;
36  }
37)
38
39(define_insn_and_split "aarch64_compare_and_swap<mode>"
40  [(set (reg:CC CC_REGNUM)					;; bool out
41    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
42   (set (match_operand:SI 0 "register_operand" "=&r")	   ;; val out
43    (zero_extend:SI
44      (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
45   (set (match_dup 1)
46    (unspec_volatile:SHORT
47      [(match_operand:SI 2 "aarch64_plus_operand" "rI")	;; expected
48       (match_operand:SHORT 3 "register_operand" "r")	;; desired
49       (match_operand:SI 4 "const_int_operand")		;; is_weak
50       (match_operand:SI 5 "const_int_operand")		;; mod_s
51       (match_operand:SI 6 "const_int_operand")]	;; mod_f
52      UNSPECV_ATOMIC_CMPSW))
53   (clobber (match_scratch:SI 7 "=&r"))]
54  ""
55  "#"
56  "&& reload_completed"
57  [(const_int 0)]
58  {
59    aarch64_split_compare_and_swap (operands);
60    DONE;
61  }
62)
63
64(define_insn_and_split "aarch64_compare_and_swap<mode>"
65  [(set (reg:CC CC_REGNUM)					;; bool out
66    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
67   (set (match_operand:GPI 0 "register_operand" "=&r")		;; val out
68    (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))   ;; memory
69   (set (match_dup 1)
70    (unspec_volatile:GPI
71      [(match_operand:GPI 2 "aarch64_plus_operand" "rI")	;; expect
72       (match_operand:GPI 3 "register_operand" "r")		;; desired
73       (match_operand:SI 4 "const_int_operand")			;; is_weak
74       (match_operand:SI 5 "const_int_operand")			;; mod_s
75       (match_operand:SI 6 "const_int_operand")]		;; mod_f
76      UNSPECV_ATOMIC_CMPSW))
77   (clobber (match_scratch:SI 7 "=&r"))]
78  ""
79  "#"
80  "&& reload_completed"
81  [(const_int 0)]
82  {
83    aarch64_split_compare_and_swap (operands);
84    DONE;
85  }
86)
87
88(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
89  [(set (reg:CC CC_REGNUM)					;; bool out
90    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
91   (set (match_operand:SI 0 "register_operand" "=&r")		;; val out
92    (zero_extend:SI
93      (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
94   (set (match_dup 1)
95    (unspec_volatile:SHORT
96      [(match_operand:SI 2 "aarch64_plus_operand" "rI")	;; expected
97       (match_operand:SHORT 3 "register_operand" "r")	;; desired
98       (match_operand:SI 4 "const_int_operand")		;; is_weak
99       (match_operand:SI 5 "const_int_operand")		;; mod_s
100       (match_operand:SI 6 "const_int_operand")]	;; mod_f
101      UNSPECV_ATOMIC_CMPSW))]
102  "TARGET_LSE"
103  "#"
104  "&& reload_completed"
105  [(const_int 0)]
106  {
107    aarch64_gen_atomic_cas (operands[0], operands[1],
108			    operands[2], operands[3],
109			    operands[5]);
110    DONE;
111  }
112)
113
114(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
115  [(set (reg:CC CC_REGNUM)					;; bool out
116    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
117   (set (match_operand:GPI 0 "register_operand" "=&r")		;; val out
118    (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))   ;; memory
119   (set (match_dup 1)
120    (unspec_volatile:GPI
121      [(match_operand:GPI 2 "aarch64_plus_operand" "rI")	;; expect
122       (match_operand:GPI 3 "register_operand" "r")		;; desired
123       (match_operand:SI 4 "const_int_operand")			;; is_weak
124       (match_operand:SI 5 "const_int_operand")			;; mod_s
125       (match_operand:SI 6 "const_int_operand")]		;; mod_f
126      UNSPECV_ATOMIC_CMPSW))]
127  "TARGET_LSE"
128  "#"
129  "&& reload_completed"
130  [(const_int 0)]
131  {
132    aarch64_gen_atomic_cas (operands[0], operands[1],
133			    operands[2], operands[3],
134			    operands[5]);
135    DONE;
136  }
137)
138
139(define_expand "atomic_exchange<mode>"
140 [(match_operand:ALLI 0 "register_operand" "")
141  (match_operand:ALLI 1 "aarch64_sync_memory_operand" "")
142  (match_operand:ALLI 2 "register_operand" "")
143  (match_operand:SI 3 "const_int_operand" "")]
144  ""
145  {
146    rtx (*gen) (rtx, rtx, rtx, rtx);
147
148    /* Use an atomic SWP when available.  */
149    if (TARGET_LSE)
150      gen = gen_aarch64_atomic_exchange<mode>_lse;
151    else
152      gen = gen_aarch64_atomic_exchange<mode>;
153
154    emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
155
156    DONE;
157  }
158)
159
160(define_insn_and_split "aarch64_atomic_exchange<mode>"
161  [(set (match_operand:ALLI 0 "register_operand" "=&r")		;; output
162    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
163   (set (match_dup 1)
164    (unspec_volatile:ALLI
165      [(match_operand:ALLI 2 "register_operand" "r")	;; input
166       (match_operand:SI 3 "const_int_operand" "")]		;; model
167      UNSPECV_ATOMIC_EXCHG))
168   (clobber (reg:CC CC_REGNUM))
169   (clobber (match_scratch:SI 4 "=&r"))]
170  ""
171  "#"
172  "&& reload_completed"
173  [(const_int 0)]
174  {
175    aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
176			     operands[2], operands[3], operands[4]);
177    DONE;
178  }
179)
180
181(define_insn_and_split "aarch64_atomic_exchange<mode>_lse"
182  [(set (match_operand:ALLI 0 "register_operand" "=&r")
183    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
184   (set (match_dup 1)
185    (unspec_volatile:ALLI
186      [(match_operand:ALLI 2 "register_operand" "r")
187       (match_operand:SI 3 "const_int_operand" "")]
188      UNSPECV_ATOMIC_EXCHG))]
189  "TARGET_LSE"
190  "#"
191  "&& reload_completed"
192  [(const_int 0)]
193  {
194    aarch64_gen_atomic_ldop (SET, operands[0], NULL, operands[1],
195			     operands[2], operands[3]);
196    DONE;
197  }
198)
199
200(define_expand "atomic_<atomic_optab><mode>"
201 [(match_operand:ALLI 0 "aarch64_sync_memory_operand" "")
202  (atomic_op:ALLI
203   (match_operand:ALLI 1 "<atomic_op_operand>" "")
204   (match_operand:SI 2 "const_int_operand"))]
205  ""
206  {
207    rtx (*gen) (rtx, rtx, rtx);
208
209    /* Use an atomic load-operate instruction when possible.  */
210    if (aarch64_atomic_ldop_supported_p (<CODE>))
211      gen = gen_aarch64_atomic_<atomic_optab><mode>_lse;
212    else
213      gen = gen_aarch64_atomic_<atomic_optab><mode>;
214
215    emit_insn (gen (operands[0], operands[1], operands[2]));
216
217    DONE;
218  }
219)
220
221(define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
222 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
223   (unspec_volatile:ALLI
224    [(atomic_op:ALLI (match_dup 0)
225      (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
226     (match_operand:SI 2 "const_int_operand")]
227    UNSPECV_ATOMIC_OP))
228  (clobber (reg:CC CC_REGNUM))
229  (clobber (match_scratch:ALLI 3 "=&r"))
230  (clobber (match_scratch:SI 4 "=&r"))]
231  ""
232  "#"
233  "&& reload_completed"
234  [(const_int 0)]
235  {
236    aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
237			     operands[1], operands[2], operands[4]);
238    DONE;
239  }
240)
241
242(define_insn_and_split "aarch64_atomic_<atomic_optab><mode>_lse"
243  [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
244    (unspec_volatile:ALLI
245      [(atomic_op:ALLI (match_dup 0)
246	(match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
247       (match_operand:SI 2 "const_int_operand")]
248      UNSPECV_ATOMIC_OP))
249   (clobber (match_scratch:ALLI 3 "=&r"))]
250  "TARGET_LSE"
251  "#"
252  "&& reload_completed"
253  [(const_int 0)]
254  {
255    aarch64_gen_atomic_ldop (<CODE>, operands[3], NULL, operands[0],
256			     operands[1], operands[2]);
257    DONE;
258  }
259)
260
261(define_insn_and_split "atomic_nand<mode>"
262  [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
263    (unspec_volatile:ALLI
264      [(not:ALLI
265	(and:ALLI (match_dup 0)
266	  (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>")))
267       (match_operand:SI 2 "const_int_operand")]		;; model
268      UNSPECV_ATOMIC_OP))
269   (clobber (reg:CC CC_REGNUM))
270   (clobber (match_scratch:ALLI 3 "=&r"))
271   (clobber (match_scratch:SI 4 "=&r"))]
272  ""
273  "#"
274  "&& reload_completed"
275  [(const_int 0)]
276  {
277     aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
278			     operands[1], operands[2], operands[4]);
279     DONE;
280  }
281)
282
283;; Load-operate-store, returning the updated memory data.
284
285(define_expand "atomic_fetch_<atomic_optab><mode>"
286 [(match_operand:ALLI 0 "register_operand" "")
287  (match_operand:ALLI 1 "aarch64_sync_memory_operand" "")
288  (atomic_op:ALLI
289   (match_operand:ALLI 2 "<atomic_op_operand>" "")
290   (match_operand:SI 3 "const_int_operand"))]
291 ""
292{
293  rtx (*gen) (rtx, rtx, rtx, rtx);
294
295  /* Use an atomic load-operate instruction when possible.  */
296  if (aarch64_atomic_ldop_supported_p (<CODE>))
297    gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>_lse;
298  else
299    gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
300
301  emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
302
303  DONE;
304})
305
306(define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
307  [(set (match_operand:ALLI 0 "register_operand" "=&r")
308    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
309   (set (match_dup 1)
310    (unspec_volatile:ALLI
311      [(atomic_op:ALLI (match_dup 1)
312	(match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
313       (match_operand:SI 3 "const_int_operand")]		;; model
314      UNSPECV_ATOMIC_OP))
315   (clobber (reg:CC CC_REGNUM))
316   (clobber (match_scratch:ALLI 4 "=&r"))
317   (clobber (match_scratch:SI 5 "=&r"))]
318  ""
319  "#"
320  "&& reload_completed"
321  [(const_int 0)]
322  {
323    aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
324			     operands[2], operands[3], operands[5]);
325    DONE;
326  }
327)
328
329(define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>_lse"
330  [(set (match_operand:ALLI 0 "register_operand" "=&r")
331    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
332   (set (match_dup 1)
333    (unspec_volatile:ALLI
334      [(atomic_op:ALLI (match_dup 1)
335	(match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
336       (match_operand:SI 3 "const_int_operand")]
337      UNSPECV_ATOMIC_LDOP))]
338  "TARGET_LSE"
339  "#"
340  "&& reload_completed"
341  [(const_int 0)]
342  {
343    aarch64_gen_atomic_ldop (<CODE>, operands[0], NULL, operands[1],
344			     operands[2], operands[3]);
345    DONE;
346  }
347)
348
349(define_insn_and_split "atomic_fetch_nand<mode>"
350  [(set (match_operand:ALLI 0 "register_operand" "=&r")
351    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
352   (set (match_dup 1)
353    (unspec_volatile:ALLI
354      [(not:ALLI
355	 (and:ALLI (match_dup 1)
356	   (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))
357       (match_operand:SI 3 "const_int_operand")]		;; model
358      UNSPECV_ATOMIC_OP))
359   (clobber (reg:CC CC_REGNUM))
360   (clobber (match_scratch:ALLI 4 "=&r"))
361   (clobber (match_scratch:SI 5 "=&r"))]
362  ""
363  "#"
364  "&& reload_completed"
365  [(const_int 0)]
366  {
367    aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
368			    operands[2], operands[3], operands[5]);
369    DONE;
370  }
371)
372
373;; Load-operate-store, returning the original memory data.
374
375(define_expand "atomic_<atomic_optab>_fetch<mode>"
376 [(match_operand:ALLI 0 "register_operand" "")
377  (atomic_op:ALLI
378   (match_operand:ALLI 1 "aarch64_sync_memory_operand" "")
379   (match_operand:ALLI 2 "<atomic_op_operand>" ""))
380  (match_operand:SI 3 "const_int_operand")]
381 ""
382{
383  rtx (*gen) (rtx, rtx, rtx, rtx);
384  rtx value = operands[2];
385
386  /* Use an atomic load-operate instruction when possible.  */
387  if (aarch64_atomic_ldop_supported_p (<CODE>))
388    gen = gen_aarch64_atomic_<atomic_optab>_fetch<mode>_lse;
389  else
390    gen = gen_aarch64_atomic_<atomic_optab>_fetch<mode>;
391
392  emit_insn (gen (operands[0], operands[1], value, operands[3]));
393
394  DONE;
395})
396
397(define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>"
398  [(set (match_operand:ALLI 0 "register_operand" "=&r")
399    (atomic_op:ALLI
400      (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
401      (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
402   (set (match_dup 1)
403    (unspec_volatile:ALLI
404      [(match_dup 1) (match_dup 2)
405       (match_operand:SI 3 "const_int_operand")]		;; model
406      UNSPECV_ATOMIC_OP))
407    (clobber (reg:CC CC_REGNUM))
408   (clobber (match_scratch:SI 4 "=&r"))]
409  ""
410  "#"
411  "&& reload_completed"
412  [(const_int 0)]
413  {
414    aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
415			     operands[2], operands[3], operands[4]);
416    DONE;
417  }
418)
419
420(define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>_lse"
421  [(set (match_operand:ALLI 0 "register_operand" "=&r")
422    (atomic_op:ALLI
423     (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
424     (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
425   (set (match_dup 1)
426    (unspec_volatile:ALLI
427      [(match_dup 1)
428       (match_dup 2)
429       (match_operand:SI 3 "const_int_operand")]
430      UNSPECV_ATOMIC_LDOP))
431     (clobber (match_scratch:ALLI 4 "=&r"))]
432  "TARGET_LSE"
433  "#"
434  "&& reload_completed"
435  [(const_int 0)]
436  {
437    aarch64_gen_atomic_ldop (<CODE>, operands[4], operands[0], operands[1],
438			     operands[2], operands[3]);
439    DONE;
440  }
441)
442
443(define_insn_and_split "atomic_nand_fetch<mode>"
444  [(set (match_operand:ALLI 0 "register_operand" "=&r")
445    (not:ALLI
446      (and:ALLI
447	(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
448	(match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))))
449   (set (match_dup 1)
450    (unspec_volatile:ALLI
451      [(match_dup 1) (match_dup 2)
452       (match_operand:SI 3 "const_int_operand")]		;; model
453      UNSPECV_ATOMIC_OP))
454   (clobber (reg:CC CC_REGNUM))
455   (clobber (match_scratch:SI 4 "=&r"))]
456  ""
457  "#"
458  "&& reload_completed"
459  [(const_int 0)]
460  {
461    aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
462			    operands[2], operands[3], operands[4]);
463    DONE;
464  }
465)
466
467(define_insn "atomic_load<mode>"
468  [(set (match_operand:ALLI 0 "register_operand" "=r")
469    (unspec_volatile:ALLI
470      [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
471       (match_operand:SI 2 "const_int_operand")]			;; model
472      UNSPECV_LDA))]
473  ""
474  {
475    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
476    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
477      return "ldr<atomic_sfx>\t%<w>0, %1";
478    else
479      return "ldar<atomic_sfx>\t%<w>0, %1";
480  }
481)
482
483(define_insn "atomic_store<mode>"
484  [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "=Q")
485    (unspec_volatile:ALLI
486      [(match_operand:ALLI 1 "general_operand" "rZ")
487       (match_operand:SI 2 "const_int_operand")]			;; model
488      UNSPECV_STL))]
489  ""
490  {
491    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
492    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
493      return "str<atomic_sfx>\t%<w>1, %0";
494    else
495      return "stlr<atomic_sfx>\t%<w>1, %0";
496  }
497)
498
499(define_insn "aarch64_load_exclusive<mode>"
500  [(set (match_operand:SI 0 "register_operand" "=r")
501    (zero_extend:SI
502      (unspec_volatile:SHORT
503	[(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
504	 (match_operand:SI 2 "const_int_operand")]
505	UNSPECV_LX)))]
506  ""
507  {
508    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
509    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
510      return "ldxr<atomic_sfx>\t%w0, %1";
511    else
512      return "ldaxr<atomic_sfx>\t%w0, %1";
513  }
514)
515
516(define_insn "aarch64_load_exclusive<mode>"
517  [(set (match_operand:GPI 0 "register_operand" "=r")
518    (unspec_volatile:GPI
519      [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
520       (match_operand:SI 2 "const_int_operand")]
521      UNSPECV_LX))]
522  ""
523  {
524    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
525    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
526      return "ldxr\t%<w>0, %1";
527    else
528      return "ldaxr\t%<w>0, %1";
529  }
530)
531
532(define_insn "aarch64_store_exclusive<mode>"
533  [(set (match_operand:SI 0 "register_operand" "=r")
534    (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
535   (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
536    (unspec_volatile:ALLI
537      [(match_operand:ALLI 2 "register_operand" "r")
538       (match_operand:SI 3 "const_int_operand")]
539      UNSPECV_SX))]
540  ""
541  {
542    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
543    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
544      return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
545    else
546      return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
547  }
548)
549
550(define_expand "mem_thread_fence"
551  [(match_operand:SI 0 "const_int_operand" "")]
552  ""
553  {
554    enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
555    if (!(is_mm_relaxed (model) || is_mm_consume (model)))
556      emit_insn (gen_dmb (operands[0]));
557    DONE;
558  }
559)
560
561(define_expand "dmb"
562  [(set (match_dup 1)
563    (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
564     UNSPEC_MB))]
565   ""
566   {
567    operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
568    MEM_VOLATILE_P (operands[1]) = 1;
569  }
570)
571
572(define_insn "*dmb"
573  [(set (match_operand:BLK 0 "" "")
574    (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
575     UNSPEC_MB))]
576  ""
577  {
578    enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
579    if (is_mm_acquire (model))
580      return "dmb\\tishld";
581    else
582      return "dmb\\tish";
583  }
584)
585
586;; ARMv8.1-A LSE instructions.
587
588;; Atomic swap with memory.
589(define_insn "aarch64_atomic_swp<mode>"
590 [(set (match_operand:ALLI 0 "register_operand" "+&r")
591   (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
592  (set (match_dup 1)
593   (unspec_volatile:ALLI
594    [(match_operand:ALLI 2 "register_operand" "r")
595     (match_operand:SI 3 "const_int_operand" "")]
596    UNSPECV_ATOMIC_SWP))]
597  "TARGET_LSE && reload_completed"
598  {
599    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
600    if (is_mm_relaxed (model))
601      return "swp<atomic_sfx>\t%<w>2, %<w>0, %1";
602    else if (is_mm_acquire (model) || is_mm_consume (model))
603      return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1";
604    else if (is_mm_release (model))
605      return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1";
606    else
607      return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1";
608  })
609
610;; Atomic compare-and-swap: HI and smaller modes.
611
612(define_insn "aarch64_atomic_cas<mode>"
613 [(set (match_operand:SI 0 "register_operand" "+&r")		  ;; out
614   (zero_extend:SI
615    (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q")))  ;; memory.
616  (set (match_dup 1)
617   (unspec_volatile:SHORT
618    [(match_dup 0)
619     (match_operand:SHORT 2 "register_operand" "r")	;; value.
620     (match_operand:SI 3 "const_int_operand" "")]	;; model.
621    UNSPECV_ATOMIC_CAS))]
622 "TARGET_LSE && reload_completed"
623{
624  enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
625  if (is_mm_relaxed (model))
626    return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
627  else if (is_mm_acquire (model) || is_mm_consume (model))
628    return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
629  else if (is_mm_release (model))
630    return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
631  else
632    return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
633})
634
635;; Atomic compare-and-swap: SI and larger modes.
636
637(define_insn "aarch64_atomic_cas<mode>"
638 [(set (match_operand:GPI 0 "register_operand" "+&r")	      ;; out
639   (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))  ;; memory.
640  (set (match_dup 1)
641   (unspec_volatile:GPI
642    [(match_dup 0)
643     (match_operand:GPI 2 "register_operand" "r")	;; value.
644     (match_operand:SI 3 "const_int_operand" "")]	;; model.
645    UNSPECV_ATOMIC_CAS))]
646  "TARGET_LSE && reload_completed"
647{
648    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
649    if (is_mm_relaxed (model))
650      return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
651    else if (is_mm_acquire (model) || is_mm_consume (model))
652      return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
653    else if (is_mm_release (model))
654      return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
655    else
656      return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
657})
658
659;; Atomic load-op: Load data, operate, store result, keep data.
660
661(define_insn "aarch64_atomic_load<atomic_ldop><mode>"
662 [(set (match_operand:ALLI 0 "register_operand" "=r")
663   (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
664  (set (match_dup 1)
665   (unspec_volatile:ALLI
666    [(match_dup 1)
667     (match_operand:ALLI 2 "register_operand")
668     (match_operand:SI 3 "const_int_operand")]
669    ATOMIC_LDOP))]
670 "TARGET_LSE && reload_completed"
671 {
672   enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
673   if (is_mm_relaxed (model))
674     return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1";
675   else if (is_mm_acquire (model) || is_mm_consume (model))
676     return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1";
677   else if (is_mm_release (model))
678     return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1";
679   else
680     return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1";
681 })
682