1! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s 2! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s 3! REQUIRES: target=powerpc{{.*}} 4 5! CHECK-LABEL: vec_perm_test_i1 6subroutine vec_perm_test_i1(arg1, arg2, arg3) 7 vector(integer(1)) :: arg1, arg2, r 8 vector(unsigned(1)) :: arg3 9 r = vec_perm(arg1, arg2, arg3) 10 11! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 12! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 13! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 14! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32> 15! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32> 16! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 17! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 18! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 19! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8> 20! LLVMIR: store <16 x i8> %[[bcall]], ptr %{{.*}}, align 16 21end subroutine vec_perm_test_i1 22 23! CHECK-LABEL: vec_perm_test_i2 24subroutine vec_perm_test_i2(arg1, arg2, arg3) 25 vector(integer(2)) :: arg1, arg2, r 26 vector(unsigned(1)) :: arg3 27 r = vec_perm(arg1, arg2, arg3) 28 29! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16 30! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16 31! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 32! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32> 33! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32> 34! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 35! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 36! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 37! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16> 38! LLVMIR: store <8 x i16> %[[bcall]], ptr %{{.*}}, align 16 39end subroutine vec_perm_test_i2 40 41! CHECK-LABEL: vec_perm_test_i4 42subroutine vec_perm_test_i4(arg1, arg2, arg3) 43 vector(integer(4)) :: arg1, arg2, r 44 vector(unsigned(1)) :: arg3 45 r = vec_perm(arg1, arg2, arg3) 46 47! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16 48! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16 49! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 50! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 51! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]]) 52! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]]) 53! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16 54end subroutine vec_perm_test_i4 55 56! CHECK-LABEL: vec_perm_test_i8 57subroutine vec_perm_test_i8(arg1, arg2, arg3) 58 vector(integer(8)) :: arg1, arg2, r 59 vector(unsigned(1)) :: arg3 60 r = vec_perm(arg1, arg2, arg3) 61 62! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 63! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 64! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 65! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32> 66! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32> 67! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 68! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 69! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 70! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64> 71! LLVMIR: store <2 x i64> %[[bcall]], ptr %{{.*}}, align 16 72end subroutine vec_perm_test_i8 73 74! CHECK-LABEL: vec_perm_test_u1 75subroutine vec_perm_test_u1(arg1, arg2, arg3) 76 vector(unsigned(1)) :: arg1, arg2, r 77 vector(unsigned(1)) :: arg3 78 r = vec_perm(arg1, arg2, arg3) 79 80! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 81! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 82! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 83! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32> 84! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32> 85! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 86! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 87! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 88! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8> 89! LLVMIR: store <16 x i8> %[[bcall]], ptr %{{.*}}, align 16 90end subroutine vec_perm_test_u1 91 92! CHECK-LABEL: vec_perm_test_u2 93subroutine vec_perm_test_u2(arg1, arg2, arg3) 94 vector(unsigned(2)) :: arg1, arg2, r 95 vector(unsigned(1)) :: arg3 96 r = vec_perm(arg1, arg2, arg3) 97 98! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16 99! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16 100! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 101! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32> 102! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32> 103! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 104! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 105! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 106! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16> 107! LLVMIR: store <8 x i16> %[[bcall]], ptr %{{.*}}, align 16 108end subroutine vec_perm_test_u2 109 110! CHECK-LABEL: vec_perm_test_u4 111subroutine vec_perm_test_u4(arg1, arg2, arg3) 112 vector(unsigned(4)) :: arg1, arg2, r 113 vector(unsigned(1)) :: arg3 114 r = vec_perm(arg1, arg2, arg3) 115 116! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16 117! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16 118! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 119! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 120! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]]) 121! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]]) 122! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16 123end subroutine vec_perm_test_u4 124 125! CHECK-LABEL: vec_perm_test_u8 126subroutine vec_perm_test_u8(arg1, arg2, arg3) 127 vector(unsigned(8)) :: arg1, arg2, r 128 vector(unsigned(1)) :: arg3 129 r = vec_perm(arg1, arg2, arg3) 130 131! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 132! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 133! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 134! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32> 135! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32> 136! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 137! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 138! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 139! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64> 140! LLVMIR: store <2 x i64> %[[bcall]], ptr %{{.*}}, align 16 141end subroutine vec_perm_test_u8 142 143! CHECK-LABEL: vec_perm_test_r4 144subroutine vec_perm_test_r4(arg1, arg2, arg3) 145 vector(real(4)) :: arg1, arg2, r 146 vector(unsigned(1)) :: arg3 147 r = vec_perm(arg1, arg2, arg3) 148 149! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16 150! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16 151! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 152! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32> 153! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <4 x i32> 154! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 155! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 156! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 157! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <4 x float> 158! LLVMIR: store <4 x float> %[[bcall]], ptr %{{.*}}, align 16 159end subroutine vec_perm_test_r4 160 161! CHECK-LABEL: vec_perm_test_r8 162subroutine vec_perm_test_r8(arg1, arg2, arg3) 163 vector(real(8)) :: arg1, arg2, r 164 vector(unsigned(1)) :: arg3 165 r = vec_perm(arg1, arg2, arg3) 166 167! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16 168! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16 169! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16 170! LLVMIR: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <4 x i32> 171! LLVMIR: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <4 x i32> 172! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1) 173! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]]) 174! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]]) 175! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x double> 176! LLVMIR: store <2 x double> %[[bcall]], ptr %{{.*}}, align 16 177end subroutine vec_perm_test_r8 178 179! CHECK-LABEL: vec_permi_test_i8i1 180subroutine vec_permi_test_i8i1(arg1, arg2, arg3) 181 vector(integer(8)) :: arg1, arg2, r 182 r = vec_permi(arg1, arg2, 3_1) 183 184! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 185! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 186! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 1, i32 3> 187! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 188end subroutine vec_permi_test_i8i1 189 190! CHECK-LABEL: vec_permi_test_i8i2 191subroutine vec_permi_test_i8i2(arg1, arg2, arg3) 192 vector(integer(8)) :: arg1, arg2, r 193 r = vec_permi(arg1, arg2, 2_2) 194 195! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 196! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 197! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 1, i32 2> 198! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 199end subroutine vec_permi_test_i8i2 200 201! CHECK-LABEL: vec_permi_test_i8i4 202subroutine vec_permi_test_i8i4(arg1, arg2, arg3) 203 vector(integer(8)) :: arg1, arg2, r 204 r = vec_permi(arg1, arg2, 1_4) 205 206! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 207! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 208! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 0, i32 3> 209! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 210end subroutine vec_permi_test_i8i4 211 212! CHECK-LABEL: vec_permi_test_i8i8 213subroutine vec_permi_test_i8i8(arg1, arg2, arg3) 214 vector(integer(8)) :: arg1, arg2, r 215 r = vec_permi(arg1, arg2, 0_8) 216 217! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 218! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 219! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 0, i32 2> 220! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 221end subroutine vec_permi_test_i8i8 222 223! CHECK-LABEL: vec_permi_test_u8i1 224subroutine vec_permi_test_u8i1(arg1, arg2, arg3) 225 vector(unsigned(8)) :: arg1, arg2, r 226 r = vec_permi(arg1, arg2, 3_1) 227 228! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 229! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 230! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 1, i32 3> 231! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 232end subroutine vec_permi_test_u8i1 233 234! CHECK-LABEL: vec_permi_test_u8i2 235subroutine vec_permi_test_u8i2(arg1, arg2, arg3) 236 vector(unsigned(8)) :: arg1, arg2, r 237 r = vec_permi(arg1, arg2, 2_2) 238 239! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 240! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 241! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 1, i32 2> 242! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 243end subroutine vec_permi_test_u8i2 244 245! CHECK-LABEL: vec_permi_test_u8i4 246subroutine vec_permi_test_u8i4(arg1, arg2, arg3) 247 vector(unsigned(8)) :: arg1, arg2, r 248 r = vec_permi(arg1, arg2, 1_4) 249 250! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 251! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 252! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 0, i32 3> 253! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 254end subroutine vec_permi_test_u8i4 255 256! CHECK-LABEL: vec_permi_test_u8i8 257subroutine vec_permi_test_u8i8(arg1, arg2, arg3) 258 vector(unsigned(8)) :: arg1, arg2, r 259 r = vec_permi(arg1, arg2, 0_8) 260 261! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 262! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16 263! LLVMIR: %[[shuf:.*]] = shufflevector <2 x i64> %[[arg1]], <2 x i64> %[[arg2]], <2 x i32> <i32 0, i32 2> 264! LLVMIR: store <2 x i64> %[[shuf]], ptr %{{.*}}, align 16 265end subroutine vec_permi_test_u8i8 266 267! CHECK-LABEL: vec_permi_test_r4i1 268subroutine vec_permi_test_r4i1(arg1, arg2, arg3) 269 vector(real(4)) :: arg1, arg2, r 270 r = vec_permi(arg1, arg2, 3_1) 271 272! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16 273! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16 274! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <2 x double> 275! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <2 x double> 276! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[barg1]], <2 x double> %[[barg2]], <2 x i32> <i32 1, i32 3> 277! LLVMIR: %[[bshuf:.*]] = bitcast <2 x double> %[[shuf]] to <4 x float> 278! LLVMIR: store <4 x float> %[[bshuf]], ptr %{{.*}}, align 16 279end subroutine vec_permi_test_r4i1 280 281! CHECK-LABEL: vec_permi_test_r4i2 282subroutine vec_permi_test_r4i2(arg1, arg2, arg3) 283 vector(real(4)) :: arg1, arg2, r 284 r = vec_permi(arg1, arg2, 2_2) 285 286! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16 287! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16 288! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <2 x double> 289! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <2 x double> 290! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[barg1]], <2 x double> %[[barg2]], <2 x i32> <i32 1, i32 2> 291! LLVMIR: %[[bshuf:.*]] = bitcast <2 x double> %[[shuf]] to <4 x float> 292! LLVMIR: store <4 x float> %[[bshuf]], ptr %{{.*}}, align 16 293end subroutine vec_permi_test_r4i2 294 295! CHECK-LABEL: vec_permi_test_r4i4 296subroutine vec_permi_test_r4i4(arg1, arg2, arg3) 297 vector(real(4)) :: arg1, arg2, r 298 r = vec_permi(arg1, arg2, 1_4) 299 300! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16 301! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16 302! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <2 x double> 303! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <2 x double> 304! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[barg1]], <2 x double> %[[barg2]], <2 x i32> <i32 0, i32 3> 305! LLVMIR: %[[bshuf:.*]] = bitcast <2 x double> %[[shuf]] to <4 x float> 306! LLVMIR: store <4 x float> %[[bshuf]], ptr %{{.*}}, align 16 307end subroutine vec_permi_test_r4i4 308 309! CHECK-LABEL: vec_permi_test_r4i8 310subroutine vec_permi_test_r4i8(arg1, arg2, arg3) 311 vector(real(4)) :: arg1, arg2, r 312 r = vec_permi(arg1, arg2, 0_8) 313 314! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16 315! LLVMIR: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16 316! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <2 x double> 317! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <2 x double> 318! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[barg1]], <2 x double> %[[barg2]], <2 x i32> <i32 0, i32 2> 319! LLVMIR: %[[bshuf:.*]] = bitcast <2 x double> %[[shuf]] to <4 x float> 320! LLVMIR: store <4 x float> %[[bshuf]], ptr %{{.*}}, align 16 321end subroutine vec_permi_test_r4i8 322 323! CHECK-LABEL: vec_permi_test_r8i1 324subroutine vec_permi_test_r8i1(arg1, arg2, arg3) 325 vector(real(8)) :: arg1, arg2, r 326 r = vec_permi(arg1, arg2, 3_1) 327 328! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16 329! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16 330! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[arg1]], <2 x double> %[[arg2]], <2 x i32> <i32 1, i32 3> 331! LLVMIR: store <2 x double> %[[shuf]], ptr %{{.*}}, align 16 332end subroutine vec_permi_test_r8i1 333 334! CHECK-LABEL: vec_permi_test_r8i2 335subroutine vec_permi_test_r8i2(arg1, arg2, arg3) 336 vector(real(8)) :: arg1, arg2, r 337 r = vec_permi(arg1, arg2, 2_2) 338 339! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16 340! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16 341! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[arg1]], <2 x double> %[[arg2]], <2 x i32> <i32 1, i32 2> 342! LLVMIR: store <2 x double> %[[shuf]], ptr %{{.*}}, align 16 343end subroutine vec_permi_test_r8i2 344 345! CHECK-LABEL: vec_permi_test_r8i4 346subroutine vec_permi_test_r8i4(arg1, arg2, arg3) 347 vector(real(8)) :: arg1, arg2, r 348 r = vec_permi(arg1, arg2, 1_4) 349 350! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16 351! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16 352! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[arg1]], <2 x double> %[[arg2]], <2 x i32> <i32 0, i32 3> 353! LLVMIR: store <2 x double> %[[shuf]], ptr %{{.*}}, align 16 354end subroutine vec_permi_test_r8i4 355 356! CHECK-LABEL: vec_permi_test_r8i8 357subroutine vec_permi_test_r8i8(arg1, arg2, arg3) 358 vector(real(8)) :: arg1, arg2, r 359 r = vec_permi(arg1, arg2, 0_8) 360 361! LLVMIR: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16 362! LLVMIR: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16 363! LLVMIR: %[[shuf:.*]] = shufflevector <2 x double> %[[arg1]], <2 x double> %[[arg2]], <2 x i32> <i32 0, i32 2> 364! LLVMIR: store <2 x double> %[[shuf]], ptr %{{.*}}, align 16 365end subroutine vec_permi_test_r8i8 366