xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir (revision fae05692a36f9ebbd201d93c2a6b0f927564d7e6)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -O0 -mtriple arm64-- -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s
3---
4name:            build_vec_f16
5alignment:       4
6legalized:       true
7tracksRegLiveness: true
8body:             |
9  bb.0:
10    liveins: $w0
11
12    ; Check that s16 operands are assigned fpr as we don't have 16 bit gpr regs.
13    ; CHECK-LABEL: name: build_vec_f16
14    ; CHECK: liveins: $w0
15    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
16    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
17    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
18    ; CHECK: [[COPY2:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
19    ; CHECK: [[COPY3:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
20    ; CHECK: [[COPY4:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
21    ; CHECK: [[COPY5:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
22    ; CHECK: [[COPY6:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
23    ; CHECK: [[COPY7:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
24    ; CHECK: [[COPY8:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
25    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<8 x s16>) = G_BUILD_VECTOR [[COPY1]](s16), [[COPY2]](s16), [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[COPY6]](s16), [[COPY7]](s16), [[COPY8]](s16)
26    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
27    ; CHECK: RET_ReallyLR implicit $q0
28    %0:_(s32) = COPY $w0
29    %1:_(s16) = G_TRUNC %0(s32)
30    %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
31    $q0 = COPY %2(<8 x s16>)
32    RET_ReallyLR implicit $q0
33
34...
35---
36name:            g_constant_operands_on_gpr
37alignment:       4
38legalized:       true
39tracksRegLiveness: true
40body:             |
41  bb.1:
42    ; Check that we assign GPR to the operands even though they're < 32b in size.
43    ; They're all constant, so we can select it via a constant-pool load if needed
44    ; and this form is more amenable to selection by patterns (without x-bank copies).
45    ; CHECK-LABEL: name: g_constant_operands_on_gpr
46    ; CHECK: [[C:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 4
47    ; CHECK: [[C1:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 10
48    ; CHECK: [[C2:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 3
49    ; CHECK: [[C3:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 11
50    ; CHECK: [[C4:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 15
51    ; CHECK: [[C5:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 44
52    ; CHECK: [[C6:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 22
53    ; CHECK: [[C7:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 19
54    ; CHECK: [[C8:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 55
55    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C]](s8), [[C1]](s8), [[C5]](s8), [[C6]](s8), [[C4]](s8), [[C]](s8), [[C7]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C8]](s8)
56    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
57    ; CHECK: RET_ReallyLR implicit $q0
58    %1:_(s8) = G_CONSTANT i8 4
59    %2:_(s8) = G_CONSTANT i8 10
60    %3:_(s8) = G_CONSTANT i8 3
61    %4:_(s8) = G_CONSTANT i8 11
62    %5:_(s8) = G_CONSTANT i8 15
63    %6:_(s8) = G_CONSTANT i8 44
64    %7:_(s8) = G_CONSTANT i8 22
65    %8:_(s8) = G_CONSTANT i8 19
66    %9:_(s8) = G_CONSTANT i8 55
67    %0:_(<16 x s8>) = G_BUILD_VECTOR %1(s8), %2(s8), %3(s8), %4(s8), %5(s8), %1(s8), %2(s8), %6(s8), %7(s8), %5(s8), %1(s8), %8(s8), %3(s8), %4(s8), %5(s8), %9(s8)
68    $q0 = COPY %0(<16 x s8>)
69    RET_ReallyLR implicit $q0
70
71...
72---
73name:            fed_by_fp_load
74alignment:       4
75legalized:       true
76tracksRegLiveness: true
77liveins:
78  - { reg: '$x0' }
79  - { reg: '$x1' }
80  - { reg: '$x2' }
81  - { reg: '$s0' }
82frameInfo:
83  maxAlignment:    1
84body:             |
85  bb.1:
86    liveins: $s0, $x0, $x1, $x2
87
88    ; CHECK-LABEL: name: fed_by_fp_load
89    ; CHECK: liveins: $s0, $x0, $x1, $x2
90    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
91    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 328
92    ; CHECK: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
93    ; CHECK: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 344
94    ; CHECK: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
95    ; CHECK: [[LOAD:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32))
96    ; CHECK: [[LOAD1:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32))
97    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
98    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
99    ; CHECK: RET_ReallyLR implicit $d0
100    %0:_(p0) = COPY $x0
101    %4:_(s64) = G_CONSTANT i64 328
102    %5:_(p0) = G_PTR_ADD %0, %4(s64)
103    %6:_(s64) = G_CONSTANT i64 344
104    %7:_(p0) = G_PTR_ADD %0, %6(s64)
105    %15:_(s32) = G_LOAD %5(p0) :: (load (s32))
106    %20:_(s32) = G_LOAD %7(p0) :: (load (s32))
107    %21:_(<2 x s32>) = G_BUILD_VECTOR %15(s32), %20(s32)
108    $d0 = COPY %21(<2 x s32>)
109    RET_ReallyLR implicit $d0
110
111...
112