1//=- AMDGPUCombine.td - Define AMDGPU Combine Rules ----------*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9include "llvm/Target/GlobalISel/Combine.td" 10 11// TODO: This really belongs after legalization after scalarization. 12 13def fmin_fmax_legacy_matchdata : GIDefMatchData<"FMinFMaxLegacyInfo">; 14 15let Predicates = [HasFminFmaxLegacy] in 16def fcmp_select_to_fmin_fmax_legacy : GICombineRule< 17 (defs root:$select, fmin_fmax_legacy_matchdata:$matchinfo), 18 (match (G_FCMP $cond, $pred, $lhs, $rhs):$fcmp, 19 (G_SELECT f32:$dst, $cond, $true, $false):$select, 20 [{ return matchFMinFMaxLegacy(*${select}, *${fcmp}, ${matchinfo}); }]), 21 (apply [{ applySelectFCmpToFMinFMaxLegacy(*${select}, ${matchinfo}); }])>; 22 23 24def uchar_to_float : GICombineRule< 25 (defs root:$itofp), 26 (match (wip_match_opcode G_UITOFP, G_SITOFP):$itofp, 27 [{ return matchUCharToFloat(*${itofp}); }]), 28 (apply [{ applyUCharToFloat(*${itofp}); }])>; 29 30 31def rcp_sqrt_to_rsq : GICombineRule< 32 (defs root:$rcp, build_fn_matchinfo:$matchinfo), 33 (match (wip_match_opcode G_INTRINSIC, G_FSQRT):$rcp, 34 [{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]), 35 (apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>; 36 37def fdiv_by_sqrt_to_rsq_f16 : GICombineRule< 38 (defs root:$root), 39 (match (G_FSQRT f16:$sqrt, $x, (MIFlags FmContract)), 40 (G_FDIV f16:$dst, $y, $sqrt, (MIFlags FmContract)):$root, 41 [{ return matchFDivSqrtToRsqF16(*${root}); }]), 42 (apply [{ applyFDivSqrtToRsqF16(*${root}, ${x}.getReg()); }])>; 43 44def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">; 45 46def cvt_f32_ubyteN : GICombineRule< 47 (defs root:$cvt_f32_ubyteN, cvt_f32_ubyteN_matchdata:$matchinfo), 48 (match (wip_match_opcode G_AMDGPU_CVT_F32_UBYTE0, 49 G_AMDGPU_CVT_F32_UBYTE1, 50 G_AMDGPU_CVT_F32_UBYTE2, 51 G_AMDGPU_CVT_F32_UBYTE3):$cvt_f32_ubyteN, 52 [{ return matchCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }]), 53 (apply [{ applyCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }])>; 54 55def clamp_i64_to_i16_matchdata : GIDefMatchData<"ClampI64ToI16MatchInfo">; 56 57def clamp_i64_to_i16 : GICombineRule< 58 (defs root:$clamp_i64_to_i16, clamp_i64_to_i16_matchdata:$matchinfo), 59 (match (wip_match_opcode G_TRUNC):$clamp_i64_to_i16, 60 [{ return matchClampI64ToI16(*${clamp_i64_to_i16}, MRI, MF, ${matchinfo}); }]), 61 (apply [{ applyClampI64ToI16(*${clamp_i64_to_i16}, ${matchinfo}); }])>; 62 63def med3_matchdata : GIDefMatchData<"Med3MatchInfo">; 64 65def int_minmax_to_med3 : GICombineRule< 66 (defs root:$min_or_max, med3_matchdata:$matchinfo), 67 (match (wip_match_opcode G_SMAX, 68 G_SMIN, 69 G_UMAX, 70 G_UMIN):$min_or_max, 71 [{ return matchIntMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]), 72 (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>; 73 74def fp_minmax_to_med3 : GICombineRule< 75 (defs root:$min_or_max, med3_matchdata:$matchinfo), 76 (match (wip_match_opcode G_FMAXNUM, 77 G_FMINNUM, 78 G_FMAXNUM_IEEE, 79 G_FMINNUM_IEEE):$min_or_max, 80 [{ return matchFPMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]), 81 (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>; 82 83def fp_minmax_to_clamp : GICombineRule< 84 (defs root:$min_or_max, register_matchinfo:$matchinfo), 85 (match (wip_match_opcode G_FMAXNUM, 86 G_FMINNUM, 87 G_FMAXNUM_IEEE, 88 G_FMINNUM_IEEE):$min_or_max, 89 [{ return matchFPMinMaxToClamp(*${min_or_max}, ${matchinfo}); }]), 90 (apply [{ applyClamp(*${min_or_max}, ${matchinfo}); }])>; 91 92def fmed3_intrinsic_to_clamp : GICombineRule< 93 (defs root:$fmed3, register_matchinfo:$matchinfo), 94 (match (wip_match_opcode G_AMDGPU_FMED3):$fmed3, 95 [{ return matchFPMed3ToClamp(*${fmed3}, ${matchinfo}); }]), 96 (apply [{ applyClamp(*${fmed3}, ${matchinfo}); }])>; 97 98def remove_fcanonicalize : GICombineRule< 99 (defs root:$fcanonicalize, register_matchinfo:$matchinfo), 100 (match (wip_match_opcode G_FCANONICALIZE):$fcanonicalize, 101 [{ return matchRemoveFcanonicalize(*${fcanonicalize}, ${matchinfo}); }]), 102 (apply [{ Helper.replaceSingleDefInstWithReg(*${fcanonicalize}, ${matchinfo}); }])>; 103 104def foldable_fneg_matchdata : GIDefMatchData<"MachineInstr *">; 105 106def foldable_fneg : GICombineRule< 107 (defs root:$ffn, foldable_fneg_matchdata:$matchinfo), 108 (match (wip_match_opcode G_FNEG):$ffn, 109 [{ return Helper.matchFoldableFneg(*${ffn}, ${matchinfo}); }]), 110 (apply [{ Helper.applyFoldableFneg(*${ffn}, ${matchinfo}); }])>; 111 112// Detects s_mul_u64 instructions whose higher bits are zero/sign extended. 113def smulu64 : GICombineRule< 114 (defs root:$smul, unsigned_matchinfo:$matchinfo), 115 (match (wip_match_opcode G_MUL):$smul, 116 [{ return matchCombine_s_mul_u64(*${smul}, ${matchinfo}); }]), 117 (apply [{ Helper.replaceOpcodeWith(*${smul}, ${matchinfo}); }])>; 118 119def sign_exension_in_reg_matchdata : GIDefMatchData<"std::pair<MachineInstr *, unsigned>">; 120 121def sign_extension_in_reg : GICombineRule< 122 (defs root:$sign_inreg, sign_exension_in_reg_matchdata:$matchinfo), 123 (match (wip_match_opcode G_SEXT_INREG):$sign_inreg, 124 [{ return matchCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }]), 125 (apply [{ applyCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }])>; 126 127// Do the following combines : 128// fmul x, select(y, A, B) -> fldexp (x, select i32 (y, a, b)) 129// fmul x, select(y, -A, -B) -> fldexp ((fneg x), select i32 (y, a, b)) 130def combine_fmul_with_select_to_fldexp : GICombineRule< 131 (defs root:$root, build_fn_matchinfo:$matchinfo), 132 (match (G_FMUL $dst, $x, $select):$root, 133 (G_SELECT $select, $y, $A, $B):$sel, 134 [{ return Helper.matchCombineFmulWithSelectToFldexp(*${root}, *${sel}, ${matchinfo}); }]), 135 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 136 137 138let Predicates = [Has16BitInsts, NotHasMed3_16] in { 139// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This 140// saves one instruction compared to the promotion. 141// 142// FIXME: Should have ComplexPattern like in/out matchers 143// 144// FIXME: We should be able to match either G_AMDGPU_FMED3 or 145// G_INTRINSIC @llvm.amdgcn.fmed3. Currently the legalizer will 146// replace the intrinsic with G_AMDGPU_FMED3 since we can't write a 147// pattern to match it. 148def expand_promoted_fmed3 : GICombineRule< 149 (defs root:$fptrunc_dst), 150 (match (G_FPTRUNC $fptrunc_dst, $fmed3_dst):$fptrunc, 151 (G_AMDGPU_FMED3 $fmed3_dst, $src0, $src1, $src2), 152 [{ return Helper.matchExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }]), 153 (apply [{ Helper.applyExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }]) 154>; 155 156} // End Predicates = [NotHasMed3_16] 157 158// Combines which should only apply on SI/CI 159def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>; 160 161// Combines which should only apply on VI 162def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>; 163 164def AMDGPUPreLegalizerCombiner: GICombiner< 165 "AMDGPUPreLegalizerCombinerImpl", 166 [all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16, foldable_fneg]> { 167 let CombineAllMethodName = "tryCombineAllImpl"; 168} 169 170def AMDGPUPostLegalizerCombiner: GICombiner< 171 "AMDGPUPostLegalizerCombinerImpl", 172 [all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp, 173 uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg, 174 rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> { 175 let CombineAllMethodName = "tryCombineAllImpl"; 176} 177 178def AMDGPURegBankCombiner : GICombiner< 179 "AMDGPURegBankCombinerImpl", 180 [unmerge_merge, unmerge_cst, unmerge_undef, 181 zext_trunc_fold, int_minmax_to_med3, ptr_add_immed_chain, 182 fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp, 183 redundant_and]> { 184} 185