11ad29a5cSAlexey Bataev; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 21ad29a5cSAlexey Bataev; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s | FileCheck %s 31ad29a5cSAlexey Bataev 41ad29a5cSAlexey Bataev@c = global [12 x i64] zeroinitializer 51ad29a5cSAlexey Bataev 61ad29a5cSAlexey Bataevdefine i32 @test() { 71ad29a5cSAlexey Bataev; CHECK-LABEL: define i32 @test( 81ad29a5cSAlexey Bataev; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { 91ad29a5cSAlexey Bataev; CHECK-NEXT: entry: 10*38fffa63SPaul Walker; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i64(ptr align 8 @c, i64 24, <4 x i1> splat (i1 true), i32 4) 1101d9528eSAlexey Bataev; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[TMP0]] to <4 x i16> 12*38fffa63SPaul Walker; CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i16> [[TMP1]], splat (i16 -1) 1301d9528eSAlexey Bataev; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> [[TMP3]]) 1401d9528eSAlexey Bataev; CHECK-NEXT: [[TMP5:%.*]] = zext i16 [[TMP4]] to i32 151ad29a5cSAlexey Bataev; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP5]], i32 1) 161ad29a5cSAlexey Bataev; CHECK-NEXT: ret i32 [[TMP6]] 171ad29a5cSAlexey Bataev; 181ad29a5cSAlexey Bataeventry: 191ad29a5cSAlexey Bataev %0 = load i64, ptr @c, align 8 201ad29a5cSAlexey Bataev %conv = trunc i64 %0 to i32 211ad29a5cSAlexey Bataev %conv3 = and i32 %conv, 65535 221ad29a5cSAlexey Bataev %conv4 = xor i32 %conv3, 65535 231ad29a5cSAlexey Bataev %.conv4 = tail call i32 @llvm.umax.i32(i32 1, i32 %conv4) 241ad29a5cSAlexey Bataev %1 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 3), align 8 251ad29a5cSAlexey Bataev %conv.1 = trunc i64 %1 to i32 261ad29a5cSAlexey Bataev %conv3.1 = and i32 %conv.1, 65535 271ad29a5cSAlexey Bataev %conv4.1 = xor i32 %conv3.1, 65535 281ad29a5cSAlexey Bataev %.conv4.1 = tail call i32 @llvm.umax.i32(i32 %.conv4, i32 %conv4.1) 291ad29a5cSAlexey Bataev %2 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 6), align 8 301ad29a5cSAlexey Bataev %conv.2 = trunc i64 %2 to i32 311ad29a5cSAlexey Bataev %conv3.2 = and i32 %conv.2, 65535 321ad29a5cSAlexey Bataev %conv4.2 = xor i32 %conv3.2, 65535 331ad29a5cSAlexey Bataev %.conv4.2 = tail call i32 @llvm.umax.i32(i32 %.conv4.1, i32 %conv4.2) 341ad29a5cSAlexey Bataev %3 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 9), align 8 351ad29a5cSAlexey Bataev %conv.3 = trunc i64 %3 to i32 361ad29a5cSAlexey Bataev %conv3.3 = and i32 %conv.3, 65535 371ad29a5cSAlexey Bataev %conv4.3 = xor i32 %conv3.3, 65535 381ad29a5cSAlexey Bataev %.conv4.3 = tail call i32 @llvm.umax.i32(i32 %.conv4.2, i32 %conv4.3) 391ad29a5cSAlexey Bataev ret i32 %.conv4.3 401ad29a5cSAlexey Bataev} 41