/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | memcmp.ll | 10 ; CHECK: // %bb.0: 13 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 0) nounwind 19 ; CHECK: // %bb.0: 20 ; CHECK-NEXT: mov w0, #1 // =0x1 22 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 0) nounwind 23 %c = icmp eq i32 %m, 0 29 ; CHECK: // %bb.0: 32 %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 0) nounwind 33 %c = icmp slt i32 %m, 0 39 ; CHECK: // %bb.0 [all...] |
H A D | dp1.ll | 5 @var32 = global i32 0 6 @var64 = global i64 0 10 ; CHECK: // %bb.0: 11 ; CHECK-NEXT: adrp x8, :got:var32 12 ; CHECK-NEXT: ldr x8, [x8, :got_lo12:var32] 13 ; CHECK-NEXT: ldr w9, [x8] 15 ; CHECK-NEXT: str w9, [x8] 25 ; CHECK: // %bb.0: 26 ; CHECK-NEXT: adrp x8, [all...] |
H A D | aarch64-mops-mte.ll | 11 ; GISel-O0: // %bb.0: // %entry 12 ; GISel-O0-NEXT: mov x8, xzr 13 ; GISel-O0-NEXT: setgp [x0]!, x8!, x8 14 ; GISel-O0-NEXT: setgm [x0]!, x8!, x8 15 ; GISel-O0-NEXT: setge [x0]!, x8!, x8 19 ; GISel: // %bb.0: // %entry 20 ; GISel-NEXT: mov x8, xzr 21 ; GISel-NEXT: setgp [x0]!, x8!, xzr 22 ; GISel-NEXT: setgm [x0]!, x8!, xzr 23 ; GISel-NEXT: setge [x0]!, x8!, xzr [all …]
|
H A D | ldp-stp-control-features.ll | 10 define i32 @ldp_aligned_int32_t(ptr %0) #0 { 12 ; CHECK: // %bb.0: 13 ; CHECK-NEXT: and x8, x0, #0xffffffffffffffc0 14 ; CHECK-NEXT: ldp w9, w8, [x8] 19 ; CHECK-DEFAULT: // %bb.0: 20 ; CHECK-DEFAULT-NEXT: and x8, x0, #0xffffffffffffffc0 21 ; CHECK-DEFAULT-NEXT: ldp w9, w8, [x8] 26 ; CHECK-DISABLE-LDP: // %bb.0: 27 ; CHECK-DISABLE-LDP-NEXT: and x8, x0, #0xffffffffffffffc0 28 ; CHECK-DISABLE-LDP-NEXT: ldr w9, [x8] [all …]
|
H A D | sve-stN.mir | 12 …- { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector… 14 bb.0: 83 ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, -8 84 ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, 7 85 ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, -8 86 ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, 7 87 ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, -8 88 ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, 7 89 ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, -8 90 ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, 7 [all …]
|
H A D | sve-ldN.mir | 12 …- { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector… 14 bb.0: 83 renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -8 84 renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 7 85 renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -8 86 renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 7 87 renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -8 88 renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 7 89 renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -8 90 renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 7 [all …]
|
H A D | wide-scalar-shift-by-byte-multiple-legalization.ll | 6 ; ALL: // %bb.0: 22 ; ALL: // %bb.0: 38 ; ALL: // %bb.0: 55 ; ALL: // %bb.0: 56 ; ALL-NEXT: ldr x8, [x1] 58 ; ALL-NEXT: lsl x8, x8, #3 59 ; ALL-NEXT: lsr x8, x9, x8 60 ; ALL-NEXT: str x8, [x [all...] |
H A D | i128_volatile_load_store.ll | 5 @x = common dso_local global i128 0 6 @y = common dso_local global i128 0 10 ; CHECK: // %bb.0: 11 ; CHECK-NEXT: adrp x8, x 12 ; CHECK-NEXT: add x8, x8, :lo12:x 15 ; CHECK-NEXT: ldp x8, x9, [x8] 16 ; CHECK-NEXT: stp x8, x9, [x10] 25 ; CHECK: // %bb.0: 26 ; CHECK-NEXT: adrp x8, x 27 ; CHECK-NEXT: add x8, x8, :lo12:x [all …]
|
H A D | wide-scalar-shift-legalization.ll | 5 ; ALL: // %bb.0: 19 ; ALL: // %bb.0: 33 ; ALL: // %bb.0: 47 ; ALL: // %bb.0: 48 ; ALL-NEXT: ldr x8, [x0] 50 ; ALL-NEXT: lsr x8, x8, x9 51 ; ALL-NEXT: str x8, [x2] 61 ; ALL: // %bb.0: 62 ; ALL-NEXT: ldr x8, [x [all...] |
H A D | fpenv.ll | 6 ; CHECK: // %bb.0: 8 ; CHECK-NEXT: mrs x8, FPCR 9 ; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff 11 ; CHECK-NEXT: and w9, w9, #0xc00000 12 ; CHECK-NEXT: orr x8, x8, x9 13 ; CHECK-NEXT: msr FPCR, x8 21 ; CHECK: // %bb.0 [all...] |
H A D | code-model-large.ll | 5 @var8 = dso_local global i8 0 6 @var16 = dso_local global i16 0 7 @var32 = dso_local global i32 0 8 @var64 = dso_local global i64 0 13 ; STATIC: // %bb.0: 24 ; PIC-NEXT: // %bb.0: 35 ; STATIC: // %bb.0: 36 ; STATIC-NEXT: movz x8, #:abs_g0_nc:var8 37 ; STATIC-NEXT: movk x8, #:abs_g1_nc:var8 38 ; STATIC-NEXT: movk x8, #:abs_g2_nc:var8 [all …]
|
H A D | urem-seteq-vec-nonsplat.ll | 7 ; CHECK: // %bb.0: 8 ; CHECK-NEXT: adrp x8, .LCPI0_0 10 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI0_0] 11 ; CHECK-NEXT: adrp x8, .LCPI0_1 14 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI0_1] 15 ; CHECK-NEXT: adrp x8, .LCPI0_3 20 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI0_3] 25 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 0, i32 0, i32 0> 35 ; CHECK: // %bb.0: 36 ; CHECK-NEXT: adrp x8, .LCPI1_0 [all …]
|
H A D | fold-global-offsets.ll | 11 ; CHECK: // %bb.0: 12 ; CHECK-NEXT: adrp x8, x1+16 13 ; CHECK-NEXT: ldr x0, [x8, :lo12:x1+16] 17 ; GISEL: // %bb.0: 18 ; GISEL-NEXT: adrp x8, x1+16 19 ; GISEL-NEXT: ldr x0, [x8, :lo12:x1+16] 21 %l = load i64, ptr getelementptr ([2 x i64], ptr @x1, i64 0, i64 2) 27 ; CHECK: // %bb.0: 28 ; CHECK-NEXT: adrp x8, x1 29 ; CHECK-NEXT: add x8, x8, :lo12:x1 [all …]
|
H A D | sve-split-extract-elt.ll | 8 ; CHECK: // %bb.0: 10 ; CHECK-NEXT: whilels p0.d, xzr, x8 20 ; CHECK: // %bb.0: 23 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00… 25 ; CHECK-NEXT: rdvl x8, #2 28 ; CHECK-NEXT: sub x8, x8, #1 29 ; CHECK-NEXT: cmp x9, x8 32 ; CHECK-NEXT: csel x8, x9, x8, lo 34 ; CHECK-NEXT: ldrb w0, [x9, x8] 44 ; CHECK: // %bb.0: [all …]
|
H A D | memset-inline.ll | 12 ; ALL: // %bb.0: 15 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1, i1 0) 21 ; ALL: // %bb.0: 25 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 2, i1 0) 31 ; ALL: // %bb.0: 33 ; ALL-NEXT: and w9, w1, #0xff 37 tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 4, i1 0) 43 ; ALL: // %bb.0: 45 ; ALL-NEXT: mov x8, #72340172838076673 46 ; ALL-NEXT: and x9, x1, #0xff [all …]
|
H A D | split-vector-insert.ll | 7 attributes #0 = {"target-features"="+sve" uwtable} 12 define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 { 14 ; CHECK-LEGALIZATION: // %bb.0: 19 …CK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0… 20 ; CHECK-LEGALIZATION-NEXT: cntd x8 22 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 23 ; CHECK-LEGALIZATION-NEXT: sub x8, x8, #2 26 ; CHECK-LEGALIZATION-NEXT: cmp x8, #2 29 ; CHECK-LEGALIZATION-NEXT: csel x9, x8, x9, lo 30 ; CHECK-LEGALIZATION-NEXT: cmp x8, #4 [all …]
|
H A D | expand-movi-renamable.mir | 7 # CHECK-LABEL: bb.0: 8 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0 9 # CHECK-NEXT: renamable $w8 = MOVZWi 4, 0 10 # CHECK-NEXT: STRWui killed renamable $w8, $sp, 0 11 # CHECK-NEXT: $w8 = MOVZWi 45926, 0 14 # CHECK-NEXT: renamable $w8 = MOVZWi 10319, 0 17 # CHECK-NEXT: renamable $w8 = MOVZWi 30600, 0 20 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 32, 0 29 maxCallFrameSize: 0 32 - { id: 0, offset: -4, size: 4, alignment: 4, local-offset: -4 } [all …]
|
/llvm-project/lld/test/ELF/ |
H A D | aarch64-tlsld-ldst.s | 10 _start: mrs x8, TPIDR_EL0 12 add x8, x8, :tprel_hi12:var0 13 ldr q20, [x8, :tprel_lo12_nc:var0] 15 add x8, x8, :tprel_hi12:var1 16 ldr x0, [x8, :tprel_lo12_nc:var1] 18 add x8, x8, :tprel_hi12:var2 19 ldr w0, [x8, :tprel_lo12_nc:var2] 21 add x8, x8, :tprel_hi12:var3 22 ldrh w0, [x8, :tprel_lo12_nc:var3] 24 add x8, x8, :tprel_hi12:var4 [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/Atomics/ |
H A D | aarch64-atomicrmw-outline_atomics.ll | 149 ; -O0: eor x8, x0, x8 151 ; -O0: orr x8, x8, x9 152 ; -O0: subs x8, x8, #0 155 ; -O1: ldxp x8, x1, [x0] 164 ; -O0: eor x8, x0, x8 [all...] |
H A D | aarch64-atomicrmw-v8_1a.ll | 148 ; -O0: casp x0, x1, x2, x3, [x8] 149 ; -O0: eor x8, x10, x8 151 ; -O0: orr x8, x8, x11 152 ; -O0: subs x8, x8, #0 158 ; -O1: ccmp x4, x6, #0, eq 165 ; -O0: caspa x0, x1, x2, x3, [x8] [all...] |
H A D | aarch64-atomicrmw-rcpc3.ll | 11 ; -O0: and w8, w9, #0xff 26 ; -O0: and w8, w9, #0xff 41 ; -O0: and w8, w9, #0xff 56 ; -O0: and w8, w9, #0xff 71 ; -O0: and w8, w9, #0xff 159 ; -O1: ldxr w0, [x8] 160 ; -O1: stxr w9, w1, [x8] 173 ; -O1: ldaxr w0, [x8] 174 ; -O1: stxr w9, w1, [x8] 187 ; -O1: ldxr w0, [x8] [all...] |
H A D | aarch64-atomicrmw-rcpc.ll | 11 ; -O0: and w8, w9, #0xff 26 ; -O0: and w8, w9, #0xff 41 ; -O0: and w8, w9, #0xff 56 ; -O0: and w8, w9, #0xff 71 ; -O0: and w8, w9, #0xff 159 ; -O1: ldxr w0, [x8] 160 ; -O1: stxr w9, w1, [x8] 173 ; -O1: ldaxr w0, [x8] 174 ; -O1: stxr w9, w1, [x8] 187 ; -O1: ldxr w0, [x8] [all...] |
H A D | aarch64-atomicrmw-v8a.ll | 11 ; -O0: and w8, w9, #0xff 26 ; -O0: and w8, w9, #0xff 41 ; -O0: and w8, w9, #0xff 56 ; -O0: and w8, w9, #0xff 71 ; -O0: and w8, w9, #0xff 159 ; -O1: ldxr w0, [x8] 160 ; -O1: stxr w9, w1, [x8] 173 ; -O1: ldaxr w0, [x8] 174 ; -O1: stxr w9, w1, [x8] 187 ; -O1: ldxr w0, [x8] [all...] |
H A D | aarch64-atomicrmw-lse2.ll | 11 ; -O0: and w8, w9, #0xff 26 ; -O0: and w8, w9, #0xff 41 ; -O0: and w8, w9, #0xff 56 ; -O0: and w8, w9, #0xff 71 ; -O0: and w8, w9, #0xff 159 ; -O1: ldxr w0, [x8] 160 ; -O1: stxr w9, w1, [x8] 173 ; -O1: ldaxr w0, [x8] 174 ; -O1: stxr w9, w1, [x8] 187 ; -O1: ldxr w0, [x8] [all...] |
H A D | aarch64-atomicrmw-lse2_lse128.ll | 148 ; -O0: swpp x0, x1, [x8] 158 ; -O0: swppa x0, x1, [x8] 168 ; -O0: swppl x0, x1, [x8] 178 ; -O0: swppal x0, x1, [x8] 188 ; -O0: swppal x0, x1, [x8] 515 ; -O0: casp x0, x1, x2, x3, [x8] 516 ; -O0: eor x8, x10, x8 518 ; -O0: orr x8, x8, x1 [all...] |