1; RUN: llc -mattr=avr6 < %s -mtriple=avr | FileCheck %s 2 3; Tests atomic operations on AVR 4 5; CHECK-LABEL: atomic_load8 6; CHECK: in r0, 63 7; CHECK-NEXT: cli 8; CHECK-NEXT: ld [[RR:r[0-9]+]], [[RD:(X|Y|Z)]] 9; CHECK-NEXT: out 63, r0 10define i8 @atomic_load8(ptr %foo) { 11 %val = load atomic i8, ptr %foo unordered, align 1 12 ret i8 %val 13} 14 15; CHECK-LABEL: atomic_load_swap8 16; CHECK: call __sync_lock_test_and_set_1 17define i8 @atomic_load_swap8(ptr %foo) { 18 %val = atomicrmw xchg ptr %foo, i8 13 seq_cst 19 ret i8 %val 20} 21 22; CHECK-LABEL: atomic_load_cmp_swap8 23; CHECK: call __sync_val_compare_and_swap_1 24define i8 @atomic_load_cmp_swap8(ptr %foo) { 25 %val = cmpxchg ptr %foo, i8 5, i8 10 acq_rel monotonic 26 %value_loaded = extractvalue { i8, i1 } %val, 0 27 ret i8 %value_loaded 28} 29 30; CHECK-LABEL: atomic_load_add8 31; CHECK: in r0, 63 32; CHECK-NEXT: cli 33; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]] 34; CHECK-NEXT: add [[RR1:r[0-9]+]], [[RD]] 35; CHECK-NEXT: st [[RR]], [[RR1]] 36; CHECK-NEXT: out 63, r0 37define i8 @atomic_load_add8(ptr %foo) { 38 %val = atomicrmw add ptr %foo, i8 13 seq_cst 39 ret i8 %val 40} 41 42; CHECK-LABEL: atomic_load_sub8 43; CHECK: in r0, 63 44; CHECK-NEXT: cli 45; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]] 46; CHECK-NEXT: mov [[TMP:r[0-9]+]], [[RD]] 47; CHECK-NEXT: sub [[TMP]], [[RR1:r[0-9]+]] 48; CHECK-NEXT: st [[RR]], [[TMP]] 49; CHECK-NEXT: out 63, r0 50define i8 @atomic_load_sub8(ptr %foo) { 51 %val = atomicrmw sub ptr %foo, i8 13 seq_cst 52 ret i8 %val 53} 54 55; CHECK-LABEL: atomic_load_and8 56; CHECK: in r0, 63 57; CHECK-NEXT: cli 58; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]] 59; CHECK-NEXT: and [[RR1:r[0-9]+]], [[RD]] 60; CHECK-NEXT: st [[RR]], [[RR1]] 61; CHECK-NEXT: out 63, r0 62define i8 @atomic_load_and8(ptr %foo) { 63 %val = atomicrmw and ptr %foo, i8 13 seq_cst 64 ret i8 %val 65} 66 67; CHECK-LABEL: atomic_load_or8 68; CHECK: in r0, 63 69; CHECK-NEXT: cli 70; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]] 71; CHECK-NEXT: or [[RR1:r[0-9]+]], [[RD]] 72; CHECK-NEXT: st [[RR]], [[RR1]] 73; CHECK-NEXT: out 63, r0 74define i8 @atomic_load_or8(ptr %foo) { 75 %val = atomicrmw or ptr %foo, i8 13 seq_cst 76 ret i8 %val 77} 78 79; CHECK-LABEL: atomic_load_xor8 80; CHECK: in r0, 63 81; CHECK-NEXT: cli 82; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]] 83; CHECK-NEXT: eor [[RR1:r[0-9]+]], [[RD]] 84; CHECK-NEXT: st [[RR]], [[RR1]] 85; CHECK-NEXT: out 63, r0 86define i8 @atomic_load_xor8(ptr %foo) { 87 %val = atomicrmw xor ptr %foo, i8 13 seq_cst 88 ret i8 %val 89} 90 91; CHECK-LABEL: atomic_load_nand8 92; CHECK: call __sync_fetch_and_nand_1 93define i8 @atomic_load_nand8(ptr %foo) { 94 %val = atomicrmw nand ptr %foo, i8 13 seq_cst 95 ret i8 %val 96} 97 98; CHECK-LABEL: atomic_load_max8 99; CHECK: call __sync_fetch_and_max_1 100define i8 @atomic_load_max8(ptr %foo) { 101 %val = atomicrmw max ptr %foo, i8 13 seq_cst 102 ret i8 %val 103} 104 105; CHECK-LABEL: atomic_load_min8 106; CHECK: call __sync_fetch_and_min_1 107define i8 @atomic_load_min8(ptr %foo) { 108 %val = atomicrmw min ptr %foo, i8 13 seq_cst 109 ret i8 %val 110} 111 112; CHECK-LABEL: atomic_load_umax8 113; CHECK: call __sync_fetch_and_umax_1 114define i8 @atomic_load_umax8(ptr %foo) { 115 %val = atomicrmw umax ptr %foo, i8 13 seq_cst 116 ret i8 %val 117} 118 119; CHECK-LABEL: atomic_load_umin8 120; CHECK: call __sync_fetch_and_umin_1 121define i8 @atomic_load_umin8(ptr %foo) { 122 %val = atomicrmw umin ptr %foo, i8 13 seq_cst 123 ret i8 %val 124} 125 126