1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=SSE 3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX 4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX 5 6--- | 7 define void @test_and_v32i8() { 8 %ret = and <32 x i8> undef, undef 9 ret void 10 } 11 12 define void @test_and_v16i16() { 13 %ret = and <16 x i16> undef, undef 14 ret void 15 } 16 17 define void @test_and_v8i32() { 18 %ret = and <8 x i32> undef, undef 19 ret void 20 } 21 22 define void @test_and_v4i64() { 23 %ret = and <4 x i64> undef, undef 24 ret void 25 } 26... 27--- 28name: test_and_v32i8 29alignment: 16 30legalized: false 31regBankSelected: false 32registers: 33 - { id: 0, class: _ } 34 - { id: 1, class: _ } 35 - { id: 2, class: _ } 36body: | 37 bb.1 (%ir-block.0): 38 liveins: $ymm0, $ymm1 39 40 ; SSE-LABEL: name: test_and_v32i8 41 ; SSE: liveins: $ymm0, $ymm1 42 ; SSE-NEXT: {{ $}} 43 ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 44 ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 45 ; SSE-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>) 46 ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>) 47 ; SSE-NEXT: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[UV]], [[UV2]] 48 ; SSE-NEXT: [[AND1:%[0-9]+]]:_(<16 x s8>) = G_AND [[UV1]], [[UV3]] 49 ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[AND]](<16 x s8>), [[AND1]](<16 x s8>) 50 ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>) 51 ; SSE-NEXT: RET 0 52 ; AVX-LABEL: name: test_and_v32i8 53 ; AVX: liveins: $ymm0, $ymm1 54 ; AVX-NEXT: {{ $}} 55 ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 56 ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 57 ; AVX-NEXT: [[AND:%[0-9]+]]:_(<32 x s8>) = G_AND [[DEF]], [[DEF1]] 58 ; AVX-NEXT: $ymm0 = COPY [[AND]](<32 x s8>) 59 ; AVX-NEXT: RET 0 60 %0(<32 x s8>) = IMPLICIT_DEF 61 %1(<32 x s8>) = IMPLICIT_DEF 62 %2(<32 x s8>) = G_AND %0, %1 63 $ymm0 = COPY %2 64 RET 0 65... 66--- 67name: test_and_v16i16 68alignment: 16 69legalized: false 70regBankSelected: false 71registers: 72 - { id: 0, class: _ } 73 - { id: 1, class: _ } 74 - { id: 2, class: _ } 75body: | 76 bb.1 (%ir-block.0): 77 liveins: $ymm0, $ymm1 78 79 ; SSE-LABEL: name: test_and_v16i16 80 ; SSE: liveins: $ymm0, $ymm1 81 ; SSE-NEXT: {{ $}} 82 ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF 83 ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF 84 ; SSE-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>) 85 ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>) 86 ; SSE-NEXT: [[AND:%[0-9]+]]:_(<8 x s16>) = G_AND [[UV]], [[UV2]] 87 ; SSE-NEXT: [[AND1:%[0-9]+]]:_(<8 x s16>) = G_AND [[UV1]], [[UV3]] 88 ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[AND]](<8 x s16>), [[AND1]](<8 x s16>) 89 ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>) 90 ; SSE-NEXT: RET 0 91 ; AVX-LABEL: name: test_and_v16i16 92 ; AVX: liveins: $ymm0, $ymm1 93 ; AVX-NEXT: {{ $}} 94 ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF 95 ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF 96 ; AVX-NEXT: [[AND:%[0-9]+]]:_(<16 x s16>) = G_AND [[DEF]], [[DEF1]] 97 ; AVX-NEXT: $ymm0 = COPY [[AND]](<16 x s16>) 98 ; AVX-NEXT: RET 0 99 %0(<16 x s16>) = IMPLICIT_DEF 100 %1(<16 x s16>) = IMPLICIT_DEF 101 %2(<16 x s16>) = G_AND %0, %1 102 $ymm0 = COPY %2 103 RET 0 104... 105--- 106name: test_and_v8i32 107alignment: 16 108legalized: false 109regBankSelected: false 110registers: 111 - { id: 0, class: _ } 112 - { id: 1, class: _ } 113 - { id: 2, class: _ } 114body: | 115 bb.1 (%ir-block.0): 116 liveins: $ymm0, $ymm1 117 118 ; SSE-LABEL: name: test_and_v8i32 119 ; SSE: liveins: $ymm0, $ymm1 120 ; SSE-NEXT: {{ $}} 121 ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF 122 ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF 123 ; SSE-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>) 124 ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>) 125 ; SSE-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[UV]], [[UV2]] 126 ; SSE-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[UV1]], [[UV3]] 127 ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[AND]](<4 x s32>), [[AND1]](<4 x s32>) 128 ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>) 129 ; SSE-NEXT: RET 0 130 ; AVX-LABEL: name: test_and_v8i32 131 ; AVX: liveins: $ymm0, $ymm1 132 ; AVX-NEXT: {{ $}} 133 ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF 134 ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF 135 ; AVX-NEXT: [[AND:%[0-9]+]]:_(<8 x s32>) = G_AND [[DEF]], [[DEF1]] 136 ; AVX-NEXT: $ymm0 = COPY [[AND]](<8 x s32>) 137 ; AVX-NEXT: RET 0 138 %0(<8 x s32>) = IMPLICIT_DEF 139 %1(<8 x s32>) = IMPLICIT_DEF 140 %2(<8 x s32>) = G_AND %0, %1 141 $ymm0 = COPY %2 142 RET 0 143... 144--- 145name: test_and_v4i64 146alignment: 16 147legalized: false 148regBankSelected: false 149registers: 150 - { id: 0, class: _ } 151 - { id: 1, class: _ } 152 - { id: 2, class: _ } 153body: | 154 bb.1 (%ir-block.0): 155 liveins: $ymm0, $ymm1 156 157 ; SSE-LABEL: name: test_and_v4i64 158 ; SSE: liveins: $ymm0, $ymm1 159 ; SSE-NEXT: {{ $}} 160 ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF 161 ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF 162 ; SSE-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>) 163 ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>) 164 ; SSE-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[UV]], [[UV2]] 165 ; SSE-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[UV1]], [[UV3]] 166 ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[AND]](<2 x s64>), [[AND1]](<2 x s64>) 167 ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>) 168 ; SSE-NEXT: RET 0 169 ; AVX-LABEL: name: test_and_v4i64 170 ; AVX: liveins: $ymm0, $ymm1 171 ; AVX-NEXT: {{ $}} 172 ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF 173 ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF 174 ; AVX-NEXT: [[AND:%[0-9]+]]:_(<4 x s64>) = G_AND [[DEF]], [[DEF1]] 175 ; AVX-NEXT: $ymm0 = COPY [[AND]](<4 x s64>) 176 ; AVX-NEXT: RET 0 177 %0(<4 x s64>) = IMPLICIT_DEF 178 %1(<4 x s64>) = IMPLICIT_DEF 179 %2(<4 x s64>) = G_AND %0, %1 180 $ymm0 = COPY %2 181 RET 0 182... 183