/llvm-project/polly/test/ScopInfo/ |
H A D | invariant_load_complex_condition.ll | 30 …floor((-1 + block_x)/8) <= -5 + block_x - 4o0) or (-3 <= block_y < 0 and block_x >= 0 and -3 + blo…
|
H A D | polly-timeout-parameter-bounds.ll | 13 ; CHECK-NEXT: [tmp] -> { Stmt_bb12[] : 1073741824*floor((536870912 + tmp)/1073741824) … 20 ; CHECK-NEXT: [tmp] -> { Stmt_bb15[] : -268435455 + tmp <= 1073741824*floor((536870912… 27 ; CHECK-NEXT: [tmp] -> { Stmt_bb18[] : -134217727 + tmp <= 1073741824*floor((536870912… 34 ; CHECK-NEXT: [tmp] -> { Stmt_bb21[] : -67108863 + tmp <= 1073741824*floor((536870912 … 41 ; CHECK-NEXT: [tmp] -> { Stmt_bb24[] : -33554431 + tmp <= 1073741824*floor((536870912 … 48 ; CHECK-NEXT: [tmp] -> { Stmt_bb27[] : -16777215 + tmp <= 1073741824*floor((536870912 … 55 ; CHECK-NEXT: [tmp] -> { Stmt_bb30[] : -8388607 + tmp <= 1073741824*floor((536870912 +… 62 ; CHECK-NEXT: [tmp] -> { Stmt_bb33[] : -4194303 + tmp <= 1073741824*floor((536870912 +… 69 ; CHECK-NEXT: [tmp] -> { Stmt_bb36[] : -2097151 + tmp <= 1073741824*floor((536870912 +… 76 ; CHECK-NEXT: [tmp] -> { Stmt_bb39[] : -1048575 + tmp <= 1073741824*floor((536870912 +… [all …]
|
/llvm-project/llvm/test/Transforms/Attributor/ |
H A D | nofpclass-floor.ll | 4 declare float @llvm.floor.f32(float) 5 declare ppc_fp128 @llvm.floor.ppcf128(ppc_fp128) 10 ; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(sub) float @llvm.floor.f32(float [[ARG0]]) #[[ATTR2:… 13 %call = call float @llvm.floor.f32(float %arg0) 20 ; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(inf sub) float @llvm.floor.f32(float nofpclass(inf) … 23 %call = call float @llvm.floor.f32(float %arg0) 30 ; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(pinf sub) float @llvm.floor.f32(float nofpclass(pinf… 33 %call = call float @llvm.floor.f32(float %arg0) 40 ; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf sub) float @llvm.floor.f32(float nofpclass(ninf… 43 %call = call float @llvm.floor.f32(float %arg0) [all …]
|
/llvm-project/clang/test/CodeGenHLSL/builtins/ |
H A D | floor.hlsl | 8 using hlsl::floor; 11 // NATIVE_HALF: call reassoc nnan ninf nsz arcp afn half @llvm.floor.f16( 13 // NO_HALF: call reassoc nnan ninf nsz arcp afn float @llvm.floor.f32(float %0) 14 half test_floor_half(half p0) { return floor(p0); } 16 // NATIVE_HALF: call reassoc nnan ninf nsz arcp afn <2 x half> @llvm.floor.v2f16( 18 // NO_HALF: call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.floor.v2f32( 19 half2 test_floor_half2(half2 p0) { return floor(p0); } 21 // NATIVE_HALF: call reassoc nnan ninf nsz arcp afn <3 x half> @llvm.floor.v3f16( 23 // NO_HALF: call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.floor.v3f32( 24 half3 test_floor_half3(half3 p0) { return floor(p [all...] |
/llvm-project/polly/lib/External/isl/test_inputs/codegen/ |
H A D | correlation.st | 5 …floor((j)/32))]; S_27[i, j, k] -> [(32*floor((i)/32))]; S_14[j] -> [(0)] }, { S_19[i, j] -> [(32*f… 10 …floor((j)/32))]; S_27[i, j, k] -> [(i - 32*floor((i)/32))]; S_14[j] -> [(0)] }, { S_19[i, j] -> [(…
|
H A D | isolate7.st | 6 … [{ S_1[i, j] -> [(32*floor((i)/32))]; S_2[i] -> [(32*floor((i)/32))] }, { S_1[i, j] -> [(32*floor… 8 …floor((i0)/32), e1 = floor((i1)/32): 32e0 = i0 and 32e1 = i1 and i0 >= 0 and i0 <= -32 + n and i1 … 10 … j] -> [(i - 32*floor((i)/32))]; S_2[i] -> [(i - 32*floor((i)/32))] }, { S_1[i, j] -> [(j - 32*flo…
|
H A D | redundant.st | 2 …floor((4 + o2)/8), e1 = floor((5 + o2)/8), e2 = floor((4 + o2)/262144), e3, e4: o1 <= 1 and o1 >= …
|
/llvm-project/llvm/test/CodeGen/AMDGPU/ |
H A D | cvt_flr_i32_f32.ll | 6 declare float @llvm.floor.f32(float) #1 14 %floor = call float @llvm.floor.f32(float %x) #1 15 %cvt = fptosi float %floor to i32 27 %floor = call float @llvm.floor.f32(float %fadd) #1 28 %cvt = fptosi float %floor to i32 40 %floor = call float @llvm.floor.f32(float %x.fabs) #1 41 %cvt = fptosi float %floor to i32 53 %floor = call float @llvm.floor.f32(float %x.fneg) #1 54 %cvt = fptosi float %floor to i32 67 %floor = call float @llvm.floor.f32(float %x.fabs.fneg) #1 [all …]
|
H A D | fract-match.ll | 22 ; floor. We can fold in the nan check into the instruction, but the 28 ; GFX6-IR-NEXT: [[FLOOR:%.*]] = tail call float @llvm.floor.f32(float [[X]]) 29 ; GFX6-IR-NEXT: [[SUB:%.*]] = fsub float [[X]], [[FLOOR]] 36 ; GFX6-IR-NEXT: store float [[FLOOR]], ptr addrspace(1) [[IP]], align 4 42 ; IR-FRACT-NEXT: [[FLOOR:%.*]] = tail call float @llvm.floor.f32(float [[X]]) 47 ; IR-FRACT-NEXT: store float [[FLOOR]], ptr addrspace(1) [[IP]], align 4 123 %floor = tail call float @llvm.floor [all...] |
H A D | fract.ll | 8 declare float @llvm.floor.f32(float) #0 17 %floor.x = call float @llvm.floor.f32(float %x) 18 %fract = fsub float %x, %floor.x 30 %floor.x.neg = call float @llvm.floor.f32(float %x.neg) 31 %fract = fsub float %x.neg, %floor.x.neg 44 %floor.neg.abs.x = call float @llvm.floor.f32(float %neg.abs.x) 45 %fract = fsub float %neg.abs.x, %floor.neg.abs.x 51 ; GCN-DAG: v_floor_f32_e32 [[FLOOR:v[0-9]+]], [[INPUT:v[0-9]+]] 54 ; GCN: buffer_store_dword [[FLOOR]] 58 %floor.x = call float @llvm.floor.f32(float %x) [all …]
|
H A D | cvt_rpi_i32_f32.ll | 6 declare float @llvm.floor.f32(float) #1 14 %floor = call float @llvm.floor.f32(float %fadd) #1 15 %cvt = fptosi float %floor to i32 27 %floor = call float @llvm.floor.f32(float %fadd) #1 28 %cvt = fptosi float %floor to i32 43 %floor = call float @llvm.floor.f32(float %fadd) #1 44 %cvt = fptosi float %floor to i32 62 %floor = call float @llvm.floor.f32(float %fadd) #1 63 %cvt = fptosi float %floor to i32 76 %floor = call float @llvm.floor.f32(float %fadd) #1 [all …]
|
H A D | ffloor.ll | 7 ; R600: FLOOR 9 %tmp = call float @llvm.floor.f32(float %in) #0 19 %tmp = call <2 x float> @llvm.floor.v2f32(<2 x float> %in) #0 30 ; R600: FLOOR 31 ; R600: FLOOR 32 ; R600: FLOOR 33 ; R600: FLOOR 35 %tmp = call <4 x float> @llvm.floor.v4f32(<4 x float> %in) #0 41 declare float @llvm.floor.f32(float) #0 44 declare <2 x float> @llvm.floor.v2f32(<2 x float>) #0 [all …]
|
H A D | ffloor.f64.ll | 6 declare double @llvm.floor.f64(double) nounwind readnone 7 declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone 8 declare <3 x double> @llvm.floor.v3f64(<3 x double>) nounwind readnone 9 declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone 10 declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone 11 declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone 23 %y = call fast double @llvm.floor.f64(double %x) nounwind readnone 39 %y = call fast double @llvm.floor.f64(double %neg) nounwind readnone 56 %y = call fast double @llvm.floor.f64(double %neg) nounwind readnone 65 %y = call fast <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone [all …]
|
H A D | fract.f64.ll | 9 declare double @llvm.floor.f64(double) #0 29 %floor.x = call double @llvm.floor.f64(double %x) 30 %fract = fsub double %x, %floor.x 54 %floor.neg.x = call double @llvm.floor.f64(double %neg.x) 55 %fract = fsub double %neg.x, %floor.neg.x 80 %floor.neg.abs.x = call double @llvm.floor.f64(double %neg.abs.x) 81 %fract = fsub double %neg.abs.x, %floor.neg.abs.x 89 %floor.x = call double @llvm.floor.f64(double %x) 90 %fract = fsub double %x, %floor.x 91 store volatile double %floor.x, ptr addrspace(1) %out
|
H A D | amdgpu-simplify-libcall-floor.ll | 30 ; CHECK-NEXT: [[RINT:%.*]] = tail call float @llvm.floor.f32(float [[ARG]]) 40 ; CHECK-NEXT: [[RINT:%.*]] = tail call <2 x float> @llvm.floor.v2f32(<2 x float> [[ARG]]) 50 ; CHECK-NEXT: [[RINT:%.*]] = tail call <3 x float> @llvm.floor.v3f32(<3 x float> [[ARG]]) 60 ; CHECK-NEXT: [[RINT:%.*]] = tail call <4 x float> @llvm.floor.v4f32(<4 x float> [[ARG]]) 70 ; CHECK-NEXT: [[RINT:%.*]] = tail call <8 x float> @llvm.floor.v8f32(<8 x float> [[ARG]]) 80 ; CHECK-NEXT: [[RINT:%.*]] = tail call <16 x float> @llvm.floor.v16f32(<16 x float> [[ARG]]) 90 ; CHECK-NEXT: [[RINT:%.*]] = tail call double @llvm.floor.f64(double [[ARG]]) 100 ; CHECK-NEXT: [[RINT:%.*]] = tail call <2 x double> @llvm.floor.v2f64(<2 x double> [[ARG]]) 110 ; CHECK-NEXT: [[RINT:%.*]] = tail call <3 x double> @llvm.floor.v3f64(<3 x double> [[ARG]]) 120 ; CHECK-NEXT: [[RINT:%.*]] = tail call <4 x double> @llvm.floor.v4f64(<4 x double> [[ARG]]) [all …]
|
/llvm-project/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/ |
H A D | floor.ll | 8 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]] 9 %elt.floor = call float @llvm.floor.f32(float %a) 10 ret float %elt.floor 15 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] Floor %[[#]] 16 %elt.floor = call half @llvm.floor.f16(half %a) 17 ret half %elt.floor 20 declare half @llvm.floor.f16(half) 21 declare float @llvm.floor [all...] |
/llvm-project/llvm/test/CodeGen/DirectX/ |
H A D | floor.ll | 3 ; Make sure dxil operation function calls for floor are generated for float and half. 8 %elt.floor = call float @llvm.floor.f32(float %a) 9 ret float %elt.floor 15 %elt.floor = call half @llvm.floor.f16(half %a) 16 ret half %elt.floor 33 %2 = call <4 x float> @llvm.floor.v4f32(<4 x float> %a) 39 declare half @llvm.floor.f16(half) 40 declare float @llvm.floor [all...] |
/llvm-project/llvm/test/CodeGen/ARM/ |
H A D | tail-call-builtin.ll | 5 ; CHECK: bl floor 8 %val = tail call double @floor(double %in) 15 ; CHECK: b floor 17 %val = tail call double @floor(double %in) 23 ; CHECK-NOT: bl floor 25 %val = tail call double @floor(double %in) 31 ; CHECK: bl floor 33 %val = tail call double @floor(double %in) 37 declare double @floor(double) nounwind readonly
|
/llvm-project/polly/lib/External/isl/test_inputs/codegen/cloog/ |
H A D | mxm-shared.st | 1 …floor((g1)/128), e1 = floor((128b1 - g1)/4096), e2 = floor((-8b0 + g0)/128), e3 = floor((-t0 + i1)… 3 … g1, g2, g3, g4, t0, t1] -> { [] : exists (e0 = floor((g0)/8), e1 = floor((-128b1 + g1)/4096), e2 …
|
/llvm-project/llvm/test/CodeGen/RISCV/rvv/ |
H A D | ffloor-sdnode.ll | 34 %a = call <vscale x 1 x bfloat> @llvm.floor.nxv1bf16(<vscale x 1 x bfloat> %x) 37 declare <vscale x 1 x bfloat> @llvm.floor.nxv1bf16(<vscale x 1 x bfloat>) 58 %a = call <vscale x 2 x bfloat> @llvm.floor.nxv2bf16(<vscale x 2 x bfloat> %x) 61 declare <vscale x 2 x bfloat> @llvm.floor.nxv2bf16(<vscale x 2 x bfloat>) 82 %a = call <vscale x 4 x bfloat> @llvm.floor.nxv4bf16(<vscale x 4 x bfloat> %x) 85 declare <vscale x 4 x bfloat> @llvm.floor.nxv4bf16(<vscale x 4 x bfloat>) 106 %a = call <vscale x 8 x bfloat> @llvm.floor.nxv8bf16(<vscale x 8 x bfloat> %x) 109 declare <vscale x 8 x bfloat> @llvm.floor.nxv8bf16(<vscale x 8 x bfloat>) 130 %a = call <vscale x 16 x bfloat> @llvm.floor.nxv16bf16(<vscale x 16 x bfloat> %x) 133 declare <vscale x 16 x bfloat> @llvm.floor [all...] |
H A D | fixed-vectors-ffloor-constrained-sdnode.ll | 25 %a = call <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half> %x, metadata !"fpexcept.strict") 28 declare <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half>, metadata) 48 %a = call <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half> %x, metadata !"fpexcept.strict") 51 declare <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half>, metadata) 71 %a = call <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half> %x, metadata !"fpexcept.strict") 74 declare <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half>, metadata) 94 %a = call <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half> %x, metadata !"fpexcept.strict") 97 declare <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half>, metadata) 117 %a = call <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half> %x, metadata !"fpexcept.strict") 120 declare <16 x half> @llvm.experimental.constrained.floor [all...] |
H A D | ffloor-constrained-sdnode.ll | 25 %a = call <vscale x 1 x half> @llvm.experimental.constrained.floor.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict") 28 declare <vscale x 1 x half> @llvm.experimental.constrained.floor.nxv1f16(<vscale x 1 x half>, metadata) 48 %a = call <vscale x 2 x half> @llvm.experimental.constrained.floor.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict") 51 declare <vscale x 2 x half> @llvm.experimental.constrained.floor.nxv2f16(<vscale x 2 x half>, metadata) 71 %a = call <vscale x 4 x half> @llvm.experimental.constrained.floor.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict") 74 declare <vscale x 4 x half> @llvm.experimental.constrained.floor.nxv4f16(<vscale x 4 x half>, metadata) 94 %a = call <vscale x 8 x half> @llvm.experimental.constrained.floor.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict") 97 declare <vscale x 8 x half> @llvm.experimental.constrained.floor.nxv8f16(<vscale x 8 x half>, metadata) 117 %a = call <vscale x 16 x half> @llvm.experimental.constrained.floor.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict") 120 declare <vscale x 16 x half> @llvm.experimental.constrained.floor [all...] |
H A D | floor-vp.ll | 15 declare <vscale x 1 x bfloat> @llvm.vp.floor.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32) 42 %v = call <vscale x 1 x bfloat> @llvm.vp.floor.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl) 65 %v = call <vscale x 1 x bfloat> @llvm.vp.floor.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 69 declare <vscale x 2 x bfloat> @llvm.vp.floor.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x i1>, i32) 96 %v = call <vscale x 2 x bfloat> @llvm.vp.floor.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl) 119 %v = call <vscale x 2 x bfloat> @llvm.vp.floor.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 123 declare <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x i1>, i32) 150 %v = call <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl) 173 %v = call <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 177 declare <vscale x 8 x bfloat> @llvm.vp.floor [all...] |
/llvm-project/llvm/test/Analysis/CostModel/X86/ |
H A D | fround.ll | 79 define i32 @floor(i32 %arg) { 80 ; SSE2-LABEL: 'floor' 81 …odel: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.floor.f32(float undef) 82 … an estimated cost of 21 for instruction: %V2F32 = call <2 x float> @llvm.floor.v2f32(<2 x float> … 83 … an estimated cost of 43 for instruction: %V4F32 = call <4 x float> @llvm.floor.v4f32(<4 x float> … 84 … an estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.floor.v8f32(<8 x float> … 85 … estimated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.floor.v16f32(<16 x float… 86 …el: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.floor.f64(double undef) 87 …an estimated cost of 21 for instruction: %V2F64 = call <2 x double> @llvm.floor.v2f64(<2 x double>… 88 …an estimated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double>… [all …]
|
/llvm-project/flang/test/Lower/math-lowering/ |
H A D | floor.f90 | 10 test_real4 = floor(x) 14 ! FAST: {{%[A-Za-z0-9._]+}} = math.floor {{%[A-Za-z0-9._]+}} {{.*}}: f32 15 ! RELAXED: {{%[A-Za-z0-9._]+}} = math.floor {{%[A-Za-z0-9._]+}} {{.*}}: f32 20 test_real8 = floor(x) 24 ! FAST: {{%[A-Za-z0-9._]+}} = math.floor {{%[A-Za-z0-9._]+}} {{.*}}: f64 25 ! RELAXED: {{%[A-Za-z0-9._]+}} = math.floor {{%[A-Za-z0-9._]+}} {{.*}}: f64 26 ! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @floor({{%[A-Za-z0-9._]+}}) {{.*}}: (f64) -> f64 29 ! PRECISE-DAG: func.func private @floor(f64) -> f64 attributes {fir.bindc_name = "floor", fir.runti…
|