1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32 3; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64 4 5define i8 @load_i8() nounwind { 6; LA32-LABEL: load_i8: 7; LA32: # %bb.0: 8; LA32-NEXT: ld.b $a0, $zero, 40 9; LA32-NEXT: ret 10; 11; LA64-LABEL: load_i8: 12; LA64: # %bb.0: 13; LA64-NEXT: ld.b $a0, $zero, 40 14; LA64-NEXT: ret 15 %a = load i8, ptr inttoptr (i64 40 to ptr), align 8 16 ret i8 %a 17} 18 19define signext i8 @load_i8_sext() nounwind { 20; LA32-LABEL: load_i8_sext: 21; LA32: # %bb.0: 22; LA32-NEXT: ld.b $a0, $zero, 40 23; LA32-NEXT: ret 24; 25; LA64-LABEL: load_i8_sext: 26; LA64: # %bb.0: 27; LA64-NEXT: ld.b $a0, $zero, 40 28; LA64-NEXT: ret 29 %a = load i8, ptr inttoptr (i64 40 to ptr), align 8 30 ret i8 %a 31} 32 33define i16 @load_i16() nounwind { 34; LA32-LABEL: load_i16: 35; LA32: # %bb.0: 36; LA32-NEXT: ld.h $a0, $zero, 40 37; LA32-NEXT: ret 38; 39; LA64-LABEL: load_i16: 40; LA64: # %bb.0: 41; LA64-NEXT: ld.h $a0, $zero, 40 42; LA64-NEXT: ret 43 %a = load i16, ptr inttoptr (i64 40 to ptr), align 8 44 ret i16 %a 45} 46 47define signext i16 @load_i16_sext() nounwind { 48; LA32-LABEL: load_i16_sext: 49; LA32: # %bb.0: 50; LA32-NEXT: ld.h $a0, $zero, 40 51; LA32-NEXT: ret 52; 53; LA64-LABEL: load_i16_sext: 54; LA64: # %bb.0: 55; LA64-NEXT: ld.h $a0, $zero, 40 56; LA64-NEXT: ret 57 %a = load i16, ptr inttoptr (i64 40 to ptr), align 8 58 ret i16 %a 59} 60 61define i32 @load_i32() nounwind { 62; LA32-LABEL: load_i32: 63; LA32: # %bb.0: 64; LA32-NEXT: ld.w $a0, $zero, 40 65; LA32-NEXT: ret 66; 67; LA64-LABEL: load_i32: 68; LA64: # %bb.0: 69; LA64-NEXT: ld.w $a0, $zero, 40 70; LA64-NEXT: ret 71 %a = load i32, ptr inttoptr (i64 40 to ptr), align 8 72 ret i32 %a 73} 74 75define signext i32 @load_i32_sext() nounwind { 76; LA32-LABEL: load_i32_sext: 77; LA32: # %bb.0: 78; LA32-NEXT: ld.w $a0, $zero, 40 79; LA32-NEXT: ret 80; 81; LA64-LABEL: load_i32_sext: 82; LA64: # %bb.0: 83; LA64-NEXT: ld.w $a0, $zero, 40 84; LA64-NEXT: ret 85 %a = load i32, ptr inttoptr (i64 40 to ptr), align 8 86 ret i32 %a 87} 88 89define i64 @load_i64() nounwind { 90; LA32-LABEL: load_i64: 91; LA32: # %bb.0: 92; LA32-NEXT: ld.w $a0, $zero, 40 93; LA32-NEXT: ld.w $a1, $zero, 44 94; LA32-NEXT: ret 95; 96; LA64-LABEL: load_i64: 97; LA64: # %bb.0: 98; LA64-NEXT: ld.d $a0, $zero, 40 99; LA64-NEXT: ret 100 %a = load i64, ptr inttoptr (i64 40 to ptr), align 8 101 ret i64 %a 102} 103 104define void @store_i8(i8 %v) nounwind { 105; LA32-LABEL: store_i8: 106; LA32: # %bb.0: 107; LA32-NEXT: st.b $a0, $zero, 40 108; LA32-NEXT: ret 109; 110; LA64-LABEL: store_i8: 111; LA64: # %bb.0: 112; LA64-NEXT: st.b $a0, $zero, 40 113; LA64-NEXT: ret 114 store i8 %v, ptr inttoptr (i64 40 to ptr), align 8 115 ret void 116} 117 118define void @store_i16(i16 %v) nounwind { 119; LA32-LABEL: store_i16: 120; LA32: # %bb.0: 121; LA32-NEXT: st.h $a0, $zero, 40 122; LA32-NEXT: ret 123; 124; LA64-LABEL: store_i16: 125; LA64: # %bb.0: 126; LA64-NEXT: st.h $a0, $zero, 40 127; LA64-NEXT: ret 128 store i16 %v, ptr inttoptr (i64 40 to ptr), align 8 129 ret void 130} 131 132define void @store_i32(i32 %v) nounwind { 133; LA32-LABEL: store_i32: 134; LA32: # %bb.0: 135; LA32-NEXT: st.w $a0, $zero, 40 136; LA32-NEXT: ret 137; 138; LA64-LABEL: store_i32: 139; LA64: # %bb.0: 140; LA64-NEXT: st.w $a0, $zero, 40 141; LA64-NEXT: ret 142 store i32 %v, ptr inttoptr (i64 40 to ptr), align 8 143 ret void 144} 145 146define void @store_i64(i64 %v) nounwind { 147; LA32-LABEL: store_i64: 148; LA32: # %bb.0: 149; LA32-NEXT: st.w $a1, $zero, 44 150; LA32-NEXT: st.w $a0, $zero, 40 151; LA32-NEXT: ret 152; 153; LA64-LABEL: store_i64: 154; LA64: # %bb.0: 155; LA64-NEXT: st.d $a0, $zero, 40 156; LA64-NEXT: ret 157 store i64 %v, ptr inttoptr (i64 40 to ptr), align 8 158 ret void 159} 160