1; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py 2; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s 3 4define void @smax() { 5; CHECK-LABEL: 'smax' 6; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smax.i8(i8 undef, i8 undef) 7; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef) 8; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef) 9; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef) 10; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef) 11; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.smax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 12; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 13; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 14; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 15; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 16; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smax.i16(i16 undef, i16 undef) 17; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef) 18; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef) 19; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef) 20; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef) 21; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 22; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 23; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 24; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 25; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 26; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smax.i32(i32 undef, i32 undef) 27; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef) 28; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef) 29; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef) 30; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef) 31; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 32; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 33; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 34; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 35; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 36; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smax.i64(i64 undef, i64 undef) 37; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef) 38; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef) 39; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef) 40; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef) 41; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 42; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 43; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 44; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 45; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void 46; 47 call i8 @llvm.smax.i8(i8 undef, i8 undef) 48 call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef) 49 call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef) 50 call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef) 51 call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef) 52 call <vscale x 1 x i8> @llvm.smax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 53 call <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 54 call <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 55 call <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 56 call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 57 call i16 @llvm.smax.i16(i16 undef, i16 undef) 58 call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef) 59 call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef) 60 call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef) 61 call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef) 62 call <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 63 call <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 64 call <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 65 call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 66 call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 67 call i32 @llvm.smax.i32(i32 undef, i32 undef) 68 call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef) 69 call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef) 70 call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef) 71 call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef) 72 call <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 73 call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 74 call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 75 call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 76 call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 77 call i64 @llvm.smax.i64(i64 undef, i64 undef) 78 call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef) 79 call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef) 80 call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef) 81 call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef) 82 call <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 83 call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 84 call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 85 call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 86 ret void 87} 88 89define void @smin() { 90; CHECK-LABEL: 'smin' 91; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smin.i8(i8 undef, i8 undef) 92; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef) 93; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef) 94; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef) 95; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef) 96; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.smin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 97; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 98; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 99; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 100; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 101; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smin.i16(i16 undef, i16 undef) 102; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef) 103; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef) 104; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef) 105; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef) 106; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 107; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 108; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 109; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 110; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 111; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smin.i32(i32 undef, i32 undef) 112; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef) 113; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef) 114; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef) 115; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef) 116; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 117; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 118; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 119; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 120; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 121; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smin.i64(i64 undef, i64 undef) 122; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef) 123; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef) 124; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef) 125; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef) 126; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 127; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 128; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 129; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 130; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void 131; 132 call i8 @llvm.smin.i8(i8 undef, i8 undef) 133 call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef) 134 call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef) 135 call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef) 136 call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef) 137 call <vscale x 1 x i8> @llvm.smin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 138 call <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 139 call <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 140 call <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 141 call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 142 call i16 @llvm.smin.i16(i16 undef, i16 undef) 143 call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef) 144 call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef) 145 call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef) 146 call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef) 147 call <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 148 call <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 149 call <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 150 call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 151 call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 152 call i32 @llvm.smin.i32(i32 undef, i32 undef) 153 call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef) 154 call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef) 155 call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef) 156 call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef) 157 call <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 158 call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 159 call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 160 call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 161 call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 162 call i64 @llvm.smin.i64(i64 undef, i64 undef) 163 call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef) 164 call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef) 165 call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef) 166 call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef) 167 call <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 168 call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 169 call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 170 call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 171 ret void 172} 173 174define void @umax() { 175; CHECK-LABEL: 'umax' 176; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umax.i8(i8 undef, i8 undef) 177; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef) 178; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef) 179; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef) 180; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef) 181; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.umax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 182; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 183; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 184; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 185; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 186; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umax.i16(i16 undef, i16 undef) 187; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef) 188; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef) 189; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef) 190; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef) 191; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 192; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 193; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 194; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 195; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 196; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umax.i32(i32 undef, i32 undef) 197; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef) 198; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef) 199; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef) 200; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef) 201; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 202; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 203; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 204; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 205; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 206; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umax.i64(i64 undef, i64 undef) 207; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef) 208; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef) 209; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef) 210; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef) 211; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 212; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 213; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 214; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 215; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void 216; 217 call i8 @llvm.umax.i8(i8 undef, i8 undef) 218 call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef) 219 call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef) 220 call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef) 221 call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef) 222 call <vscale x 1 x i8> @llvm.umax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 223 call <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 224 call <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 225 call <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 226 call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 227 call i16 @llvm.umax.i16(i16 undef, i16 undef) 228 call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef) 229 call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef) 230 call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef) 231 call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef) 232 call <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 233 call <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 234 call <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 235 call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 236 call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 237 call i32 @llvm.umax.i32(i32 undef, i32 undef) 238 call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef) 239 call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef) 240 call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef) 241 call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef) 242 call <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 243 call <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 244 call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 245 call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 246 call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 247 call i64 @llvm.umax.i64(i64 undef, i64 undef) 248 call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef) 249 call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef) 250 call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef) 251 call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef) 252 call <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 253 call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 254 call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 255 call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 256 ret void 257} 258 259define void @umin() { 260; CHECK-LABEL: 'umin' 261; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umin.i8(i8 undef, i8 undef) 262; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef) 263; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef) 264; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef) 265; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef) 266; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.umin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 267; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 268; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 269; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 270; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 271; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umin.i16(i16 undef, i16 undef) 272; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef) 273; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef) 274; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef) 275; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef) 276; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 277; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 278; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 279; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 280; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 281; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umin.i32(i32 undef, i32 undef) 282; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef) 283; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef) 284; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef) 285; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef) 286; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 287; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 288; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 289; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 290; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 291; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umin.i64(i64 undef, i64 undef) 292; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef) 293; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef) 294; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef) 295; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef) 296; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 297; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 298; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 299; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 300; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void 301; 302 call i8 @llvm.umin.i8(i8 undef, i8 undef) 303 call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef) 304 call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef) 305 call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef) 306 call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef) 307 call <vscale x 1 x i8> @llvm.umin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef) 308 call <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef) 309 call <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef) 310 call <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef) 311 call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef) 312 call i16 @llvm.umin.i16(i16 undef, i16 undef) 313 call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef) 314 call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef) 315 call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef) 316 call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef) 317 call <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef) 318 call <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef) 319 call <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef) 320 call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef) 321 call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef) 322 call i32 @llvm.umin.i32(i32 undef, i32 undef) 323 call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef) 324 call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef) 325 call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef) 326 call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef) 327 call <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef) 328 call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef) 329 call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef) 330 call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef) 331 call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef) 332 call i64 @llvm.umin.i64(i64 undef, i64 undef) 333 call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef) 334 call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef) 335 call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef) 336 call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef) 337 call <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef) 338 call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef) 339 call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef) 340 call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef) 341 ret void 342} 343 344declare i8 @llvm.smax.i8(i8, i8) 345declare <2 x i8> @llvm.smax.v2i8(<2 x i8>, <2 x i8>) 346declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>) 347declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>) 348declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) 349declare <vscale x 1 x i8> @llvm.smax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>) 350declare <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>) 351declare <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>) 352declare <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>) 353declare <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 354declare i16 @llvm.smax.i16(i16, i16) 355declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>) 356declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) 357declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) 358declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>) 359declare <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>) 360declare <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>) 361declare <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>) 362declare <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 363declare <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>) 364declare i32 @llvm.smax.i32(i32, i32) 365declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) 366declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) 367declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) 368declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>) 369declare <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>) 370declare <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>) 371declare <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 372declare <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>) 373declare <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>) 374declare i64 @llvm.smax.i64(i64, i64) 375declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) 376declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) 377declare <8 x i64> @llvm.smax.v8i64(<8 x i64>, <8 x i64>) 378declare <16 x i64> @llvm.smax.v16i64(<16 x i64>, <16 x i64>) 379declare <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>) 380declare <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 381declare <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>) 382declare <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>) 383 384declare i8 @llvm.smin.i8(i8, i8) 385declare <2 x i8> @llvm.smin.v2i8(<2 x i8>, <2 x i8>) 386declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>) 387declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>) 388declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) 389declare <vscale x 1 x i8> @llvm.smin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>) 390declare <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>) 391declare <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>) 392declare <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>) 393declare <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 394declare i16 @llvm.smin.i16(i16, i16) 395declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>) 396declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) 397declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) 398declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>) 399declare <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>) 400declare <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>) 401declare <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>) 402declare <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 403declare <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>) 404declare i32 @llvm.smin.i32(i32, i32) 405declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) 406declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) 407declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) 408declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>) 409declare <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>) 410declare <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>) 411declare <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 412declare <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>) 413declare <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>) 414declare i64 @llvm.smin.i64(i64, i64) 415declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) 416declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) 417declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>) 418declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>) 419declare <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>) 420declare <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 421declare <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>) 422declare <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>) 423 424declare i8 @llvm.umax.i8(i8, i8) 425declare <2 x i8> @llvm.umax.v2i8(<2 x i8>, <2 x i8>) 426declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>) 427declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>) 428declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) 429declare <vscale x 1 x i8> @llvm.umax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>) 430declare <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>) 431declare <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>) 432declare <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>) 433declare <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 434declare i16 @llvm.umax.i16(i16, i16) 435declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>) 436declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>) 437declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) 438declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>) 439declare <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>) 440declare <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>) 441declare <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>) 442declare <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 443declare <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>) 444declare i32 @llvm.umax.i32(i32, i32) 445declare <2 x i32> @llvm.umax.v2i32(<2 x i32>, <2 x i32>) 446declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) 447declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>) 448declare <16 x i32> @llvm.umax.v16i32(<16 x i32>, <16 x i32>) 449declare <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>) 450declare <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>) 451declare <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 452declare <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>) 453declare <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>) 454declare i64 @llvm.umax.i64(i64, i64) 455declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) 456declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>) 457declare <8 x i64> @llvm.umax.v8i64(<8 x i64>, <8 x i64>) 458declare <16 x i64> @llvm.umax.v16i64(<16 x i64>, <16 x i64>) 459declare <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>) 460declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 461declare <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>) 462declare <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>) 463 464declare i8 @llvm.umin.i8(i8, i8) 465declare <2 x i8> @llvm.umin.v2i8(<2 x i8>, <2 x i8>) 466declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>) 467declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>) 468declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) 469declare <vscale x 1 x i8> @llvm.umin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>) 470declare <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>) 471declare <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>) 472declare <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>) 473declare <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 474declare i16 @llvm.umin.i16(i16, i16) 475declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>) 476declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) 477declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) 478declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>) 479declare <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>) 480declare <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>) 481declare <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>) 482declare <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 483declare <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>) 484declare i32 @llvm.umin.i32(i32, i32) 485declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) 486declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) 487declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) 488declare <16 x i32> @llvm.umin.v16i32(<16 x i32>, <16 x i32>) 489declare <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>) 490declare <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>) 491declare <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 492declare <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>) 493declare <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>) 494declare i64 @llvm.umin.i64(i64, i64) 495declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) 496declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) 497declare <8 x i64> @llvm.umin.v8i64(<8 x i64>, <8 x i64>) 498declare <16 x i64> @llvm.umin.v16i64(<16 x i64>, <16 x i64>) 499declare <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>) 500declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 501declare <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>) 502declare <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>) 503