1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v \ 3; RUN: -riscv-v-vector-bits-min=-1 -riscv-v-slp-max-vf=0 -S | FileCheck %s --check-prefixes=CHECK 4; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v -S | FileCheck %s --check-prefixes=DEFAULT 5 6define void @vec_add(ptr %dest, ptr %p) { 7; CHECK-LABEL: @vec_add( 8; CHECK-NEXT: entry: 9; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 10; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i16> [[TMP0]], splat (i16 1) 11; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 12; CHECK-NEXT: ret void 13; 14; DEFAULT-LABEL: @vec_add( 15; DEFAULT-NEXT: entry: 16; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 17; DEFAULT-NEXT: [[TMP1:%.*]] = add <2 x i16> [[TMP0]], splat (i16 1) 18; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 19; DEFAULT-NEXT: ret void 20; 21entry: 22 %e0 = load i16, ptr %p, align 4 23 %inc = getelementptr inbounds i16, ptr %p, i64 1 24 %e1 = load i16, ptr %inc, align 2 25 26 %a0 = add i16 %e0, 1 27 %a1 = add i16 %e1, 1 28 29 store i16 %a0, ptr %dest, align 4 30 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 31 store i16 %a1, ptr %inc2, align 2 32 ret void 33} 34 35define void @vec_sub(ptr %dest, ptr %p) { 36; CHECK-LABEL: @vec_sub( 37; CHECK-NEXT: entry: 38; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 39; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i16> [[TMP0]], splat (i16 17) 40; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 41; CHECK-NEXT: ret void 42; 43; DEFAULT-LABEL: @vec_sub( 44; DEFAULT-NEXT: entry: 45; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 46; DEFAULT-NEXT: [[TMP1:%.*]] = sub <2 x i16> [[TMP0]], splat (i16 17) 47; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 48; DEFAULT-NEXT: ret void 49; 50entry: 51 %e0 = load i16, ptr %p, align 4 52 %inc = getelementptr inbounds i16, ptr %p, i64 1 53 %e1 = load i16, ptr %inc, align 2 54 55 %a0 = sub i16 %e0, 17 56 %a1 = sub i16 %e1, 17 57 58 store i16 %a0, ptr %dest, align 4 59 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 60 store i16 %a1, ptr %inc2, align 2 61 ret void 62} 63 64define void @vec_rsub(ptr %dest, ptr %p) { 65; CHECK-LABEL: @vec_rsub( 66; CHECK-NEXT: entry: 67; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 68; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i16> splat (i16 29), [[TMP0]] 69; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 70; CHECK-NEXT: ret void 71; 72; DEFAULT-LABEL: @vec_rsub( 73; DEFAULT-NEXT: entry: 74; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 75; DEFAULT-NEXT: [[TMP1:%.*]] = sub <2 x i16> splat (i16 29), [[TMP0]] 76; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 77; DEFAULT-NEXT: ret void 78; 79entry: 80 %e0 = load i16, ptr %p, align 4 81 %inc = getelementptr inbounds i16, ptr %p, i64 1 82 %e1 = load i16, ptr %inc, align 2 83 84 %a0 = sub i16 29, %e0 85 %a1 = sub i16 29, %e1 86 87 store i16 %a0, ptr %dest, align 4 88 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 89 store i16 %a1, ptr %inc2, align 2 90 ret void 91} 92 93define void @vec_mul(ptr %dest, ptr %p) { 94; CHECK-LABEL: @vec_mul( 95; CHECK-NEXT: entry: 96; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 97; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i16> [[TMP0]], splat (i16 7) 98; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 99; CHECK-NEXT: ret void 100; 101; DEFAULT-LABEL: @vec_mul( 102; DEFAULT-NEXT: entry: 103; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 104; DEFAULT-NEXT: [[TMP1:%.*]] = mul <2 x i16> [[TMP0]], splat (i16 7) 105; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 106; DEFAULT-NEXT: ret void 107; 108entry: 109 %e0 = load i16, ptr %p, align 4 110 %inc = getelementptr inbounds i16, ptr %p, i64 1 111 %e1 = load i16, ptr %inc, align 2 112 113 %a0 = mul i16 %e0, 7 114 %a1 = mul i16 %e1, 7 115 116 store i16 %a0, ptr %dest, align 4 117 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 118 store i16 %a1, ptr %inc2, align 2 119 ret void 120} 121 122define void @vec_sdiv(ptr %dest, ptr %p) { 123; CHECK-LABEL: @vec_sdiv( 124; CHECK-NEXT: entry: 125; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 126; CHECK-NEXT: [[TMP1:%.*]] = sdiv <2 x i16> [[TMP0]], splat (i16 7) 127; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 128; CHECK-NEXT: ret void 129; 130; DEFAULT-LABEL: @vec_sdiv( 131; DEFAULT-NEXT: entry: 132; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 133; DEFAULT-NEXT: [[TMP1:%.*]] = sdiv <2 x i16> [[TMP0]], splat (i16 7) 134; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4 135; DEFAULT-NEXT: ret void 136; 137entry: 138 %e0 = load i16, ptr %p, align 4 139 %inc = getelementptr inbounds i16, ptr %p, i64 1 140 %e1 = load i16, ptr %inc, align 2 141 142 %a0 = sdiv i16 %e0, 7 143 %a1 = sdiv i16 %e1, 7 144 145 store i16 %a0, ptr %dest, align 4 146 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 147 store i16 %a1, ptr %inc2, align 2 148 ret void 149} 150 151define void @vec_and(ptr %dest, ptr %p, ptr %q) { 152; CHECK-LABEL: @vec_and( 153; CHECK-NEXT: entry: 154; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 155; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 156; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i16> [[TMP0]], [[TMP1]] 157; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 158; CHECK-NEXT: ret void 159; 160; DEFAULT-LABEL: @vec_and( 161; DEFAULT-NEXT: entry: 162; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 163; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 164; DEFAULT-NEXT: [[TMP2:%.*]] = and <2 x i16> [[TMP0]], [[TMP1]] 165; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 166; DEFAULT-NEXT: ret void 167; 168entry: 169 %e0 = load i16, ptr %p, align 4 170 %inc = getelementptr inbounds i16, ptr %p, i64 1 171 %e1 = load i16, ptr %inc, align 2 172 173 %f0 = load i16, ptr %q, align 4 174 %inq = getelementptr inbounds i16, ptr %q, i64 1 175 %f1 = load i16, ptr %inq, align 2 176 177 %a0 = and i16 %e0, %f0 178 %a1 = and i16 %e1, %f1 179 180 store i16 %a0, ptr %dest, align 4 181 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 182 store i16 %a1, ptr %inc2, align 2 183 ret void 184} 185 186define void @vec_or(ptr %dest, ptr %p, ptr %q) { 187; CHECK-LABEL: @vec_or( 188; CHECK-NEXT: entry: 189; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 190; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 191; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i16> [[TMP0]], [[TMP1]] 192; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 193; CHECK-NEXT: ret void 194; 195; DEFAULT-LABEL: @vec_or( 196; DEFAULT-NEXT: entry: 197; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 198; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 199; DEFAULT-NEXT: [[TMP2:%.*]] = or <2 x i16> [[TMP0]], [[TMP1]] 200; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 201; DEFAULT-NEXT: ret void 202; 203entry: 204 %e0 = load i16, ptr %p, align 4 205 %inc = getelementptr inbounds i16, ptr %p, i64 1 206 %e1 = load i16, ptr %inc, align 2 207 208 %f0 = load i16, ptr %q, align 4 209 %inq = getelementptr inbounds i16, ptr %q, i64 1 210 %f1 = load i16, ptr %inq, align 2 211 212 %a0 = or i16 %e0, %f0 213 %a1 = or i16 %e1, %f1 214 215 store i16 %a0, ptr %dest, align 4 216 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 217 store i16 %a1, ptr %inc2, align 2 218 ret void 219} 220 221define void @vec_sll(ptr %dest, ptr %p, ptr %q) { 222; CHECK-LABEL: @vec_sll( 223; CHECK-NEXT: entry: 224; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 225; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 226; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i16> [[TMP0]], [[TMP1]] 227; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 228; CHECK-NEXT: ret void 229; 230; DEFAULT-LABEL: @vec_sll( 231; DEFAULT-NEXT: entry: 232; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 233; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 234; DEFAULT-NEXT: [[TMP2:%.*]] = shl <2 x i16> [[TMP0]], [[TMP1]] 235; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 236; DEFAULT-NEXT: ret void 237; 238entry: 239 %e0 = load i16, ptr %p, align 4 240 %inc = getelementptr inbounds i16, ptr %p, i64 1 241 %e1 = load i16, ptr %inc, align 2 242 243 %f0 = load i16, ptr %q, align 4 244 %inq = getelementptr inbounds i16, ptr %q, i64 1 245 %f1 = load i16, ptr %inq, align 2 246 247 %a0 = shl i16 %e0, %f0 248 %a1 = shl i16 %e1, %f1 249 250 store i16 %a0, ptr %dest, align 4 251 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 252 store i16 %a1, ptr %inc2, align 2 253 ret void 254} 255 256declare i16 @llvm.smin.i16(i16, i16) 257define void @vec_smin(ptr %dest, ptr %p, ptr %q) { 258; CHECK-LABEL: @vec_smin( 259; CHECK-NEXT: entry: 260; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 261; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 262; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]]) 263; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 264; CHECK-NEXT: ret void 265; 266; DEFAULT-LABEL: @vec_smin( 267; DEFAULT-NEXT: entry: 268; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 269; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 270; DEFAULT-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]]) 271; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 272; DEFAULT-NEXT: ret void 273; 274entry: 275 %e0 = load i16, ptr %p, align 4 276 %inc = getelementptr inbounds i16, ptr %p, i64 1 277 %e1 = load i16, ptr %inc, align 2 278 279 %f0 = load i16, ptr %q, align 4 280 %inq = getelementptr inbounds i16, ptr %q, i64 1 281 %f1 = load i16, ptr %inq, align 2 282 283 %a0 = tail call i16 @llvm.smin.i16(i16 %e0, i16 %f0) 284 %a1 = tail call i16 @llvm.smin.i16(i16 %e1, i16 %f1) 285 286 store i16 %a0, ptr %dest, align 4 287 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 288 store i16 %a1, ptr %inc2, align 2 289 ret void 290} 291 292declare i16 @llvm.umax.i16(i16, i16) 293define void @vec_umax(ptr %dest, ptr %p, ptr %q) { 294; CHECK-LABEL: @vec_umax( 295; CHECK-NEXT: entry: 296; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 297; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 298; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]]) 299; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 300; CHECK-NEXT: ret void 301; 302; DEFAULT-LABEL: @vec_umax( 303; DEFAULT-NEXT: entry: 304; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4 305; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4 306; DEFAULT-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]]) 307; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4 308; DEFAULT-NEXT: ret void 309; 310entry: 311 %e0 = load i16, ptr %p, align 4 312 %inc = getelementptr inbounds i16, ptr %p, i64 1 313 %e1 = load i16, ptr %inc, align 2 314 315 %f0 = load i16, ptr %q, align 4 316 %inq = getelementptr inbounds i16, ptr %q, i64 1 317 %f1 = load i16, ptr %inq, align 2 318 319 %a0 = tail call i16 @llvm.umax.i16(i16 %e0, i16 %f0) 320 %a1 = tail call i16 @llvm.umax.i16(i16 %e1, i16 %f1) 321 322 store i16 %a0, ptr %dest, align 4 323 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1 324 store i16 %a1, ptr %inc2, align 2 325 ret void 326} 327