1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s 3; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s 4 5 6define <4 x i32> @gt_v4f32(<4 x float> %a, <4 x float> %b) { 7; CHECK-LABEL: gt_v4f32: 8; CHECK: // %bb.0: // %entry 9; CHECK-NEXT: facgt v0.4s, v0.4s, v1.4s 10; CHECK-NEXT: ret 11entry: 12 %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 13 %vabs1.i2 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %b) 14 %cmp = fcmp ogt <4 x float> %vabs1.i, %vabs1.i2 15 %sext = sext <4 x i1> %cmp to <4 x i32> 16 ret <4 x i32> %sext 17} 18 19define <4 x i32> @ge_v4f32(<4 x float> %a, <4 x float> %b) { 20; CHECK-LABEL: ge_v4f32: 21; CHECK: // %bb.0: // %entry 22; CHECK-NEXT: facge v0.4s, v0.4s, v1.4s 23; CHECK-NEXT: ret 24entry: 25 %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 26 %vabs1.i2 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %b) 27 %cmp = fcmp oge <4 x float> %vabs1.i, %vabs1.i2 28 %sext = sext <4 x i1> %cmp to <4 x i32> 29 ret <4 x i32> %sext 30} 31 32define <4 x i32> @lt_v4f32(<4 x float> %a, <4 x float> %b) { 33; CHECK-LABEL: lt_v4f32: 34; CHECK: // %bb.0: // %entry 35; CHECK-NEXT: facgt v0.4s, v1.4s, v0.4s 36; CHECK-NEXT: ret 37entry: 38 %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 39 %vabs1.i2 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %b) 40 %cmp = fcmp olt <4 x float> %vabs1.i, %vabs1.i2 41 %sext = sext <4 x i1> %cmp to <4 x i32> 42 ret <4 x i32> %sext 43} 44 45define <4 x i32> @le_v4f32(<4 x float> %a, <4 x float> %b) { 46; CHECK-LABEL: le_v4f32: 47; CHECK: // %bb.0: // %entry 48; CHECK-NEXT: facge v0.4s, v1.4s, v0.4s 49; CHECK-NEXT: ret 50entry: 51 %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 52 %vabs1.i2 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %b) 53 %cmp = fcmp ole <4 x float> %vabs1.i, %vabs1.i2 54 %sext = sext <4 x i1> %cmp to <4 x i32> 55 ret <4 x i32> %sext 56} 57 58define <2 x i32> @gt_v2f32(<2 x float> %a, <2 x float> %b) { 59; CHECK-LABEL: gt_v2f32: 60; CHECK: // %bb.0: // %entry 61; CHECK-NEXT: facgt v0.2s, v0.2s, v1.2s 62; CHECK-NEXT: ret 63entry: 64 %vabs1.i = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %a) 65 %vabs1.i2 = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %b) 66 %cmp = fcmp ogt <2 x float> %vabs1.i, %vabs1.i2 67 %sext = sext <2 x i1> %cmp to <2 x i32> 68 ret <2 x i32> %sext 69} 70 71define <2 x i32> @ge_v2f32(<2 x float> %a, <2 x float> %b) { 72; CHECK-LABEL: ge_v2f32: 73; CHECK: // %bb.0: // %entry 74; CHECK-NEXT: facge v0.2s, v0.2s, v1.2s 75; CHECK-NEXT: ret 76entry: 77 %vabs1.i = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %a) 78 %vabs1.i2 = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %b) 79 %cmp = fcmp oge <2 x float> %vabs1.i, %vabs1.i2 80 %sext = sext <2 x i1> %cmp to <2 x i32> 81 ret <2 x i32> %sext 82} 83 84define <4 x i16> @gt_v4f16(<4 x half> %a, <4 x half> %b) { 85; CHECK-LABEL: gt_v4f16: 86; CHECK: // %bb.0: // %entry 87; CHECK-NEXT: facgt v0.4h, v0.4h, v1.4h 88; CHECK-NEXT: ret 89entry: 90 %vabs1.i = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %a) 91 %vabs1.i2 = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %b) 92 %cmp = fcmp ogt <4 x half> %vabs1.i, %vabs1.i2 93 %sext = sext <4 x i1> %cmp to <4 x i16> 94 ret <4 x i16> %sext 95} 96 97define <4 x i16> @ge_v4f16(<4 x half> %a, <4 x half> %b) { 98; CHECK-LABEL: ge_v4f16: 99; CHECK: // %bb.0: // %entry 100; CHECK-NEXT: facge v0.4h, v0.4h, v1.4h 101; CHECK-NEXT: ret 102entry: 103 %vabs1.i = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %a) 104 %vabs1.i2 = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %b) 105 %cmp = fcmp oge <4 x half> %vabs1.i, %vabs1.i2 106 %sext = sext <4 x i1> %cmp to <4 x i16> 107 ret <4 x i16> %sext 108} 109 110define <8 x i16> @gt_v8f16(<8 x half> %a, <8 x half> %b) { 111; CHECK-LABEL: gt_v8f16: 112; CHECK: // %bb.0: // %entry 113; CHECK-NEXT: facgt v0.8h, v0.8h, v1.8h 114; CHECK-NEXT: ret 115entry: 116 %vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 117 %vabs1.i2 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 118 %cmp = fcmp ogt <8 x half> %vabs1.i, %vabs1.i2 119 %sext = sext <8 x i1> %cmp to <8 x i16> 120 ret <8 x i16> %sext 121} 122 123define <8 x i16> @ge_v8f16(<8 x half> %a, <8 x half> %b) { 124; CHECK-LABEL: ge_v8f16: 125; CHECK: // %bb.0: // %entry 126; CHECK-NEXT: facge v0.8h, v0.8h, v1.8h 127; CHECK-NEXT: ret 128entry: 129 %vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) 130 %vabs1.i2 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %b) 131 %cmp = fcmp oge <8 x half> %vabs1.i, %vabs1.i2 132 %sext = sext <8 x i1> %cmp to <8 x i16> 133 ret <8 x i16> %sext 134} 135 136define <2 x i64> @gt_v2f64(<2 x double> %a, <2 x double> %b) { 137; CHECK-LABEL: gt_v2f64: 138; CHECK: // %bb.0: // %entry 139; CHECK-NEXT: facgt v0.2d, v0.2d, v1.2d 140; CHECK-NEXT: ret 141entry: 142 %vabs1.i = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %a) 143 %vabs1.i2 = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %b) 144 %cmp = fcmp ogt <2 x double> %vabs1.i, %vabs1.i2 145 %sext = sext <2 x i1> %cmp to <2 x i64> 146 ret <2 x i64> %sext 147} 148 149define <2 x i64> @ge_v2f64(<2 x double> %a, <2 x double> %b) { 150; CHECK-LABEL: ge_v2f64: 151; CHECK: // %bb.0: // %entry 152; CHECK-NEXT: facge v0.2d, v0.2d, v1.2d 153; CHECK-NEXT: ret 154entry: 155 %vabs1.i = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %a) 156 %vabs1.i2 = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %b) 157 %cmp = fcmp oge <2 x double> %vabs1.i, %vabs1.i2 158 %sext = sext <2 x i1> %cmp to <2 x i64> 159 ret <2 x i64> %sext 160} 161 162declare <8 x half> @llvm.fabs.v8f16(<8 x half>) 163declare <4 x half> @llvm.fabs.v4f16(<4 x half>) 164declare <4 x float> @llvm.fabs.v4f32(<4 x float>) 165declare <2 x float> @llvm.fabs.v2f32(<2 x float>) 166declare <2 x double> @llvm.fabs.v2f64(<2 x double>) 167