Lines Matching defs:s64
59 LLT s64 = LLT::scalar(64);
60 LLT v2s64 = LLT::fixed_vector(2, s64);
61 LLT v4s64 = LLT::fixed_vector(4, s64);
64 B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
73 auto ICst = B.buildConstant(s64, 15).getReg(0);
115 LLT s64 = LLT::scalar(64);
117 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
130 auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
148 auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
159 auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
164 auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
165 {Copies[0], B.buildConstant(s64, 42)});
174 auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
175 {Copies[0], B.buildConstant(s64, 42)});
182 auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
191 auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
201 auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
209 auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
223 auto MIBCst = B.buildConstant(s64, 42);
224 auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]);
225 auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42));
228 auto SMin = B.buildSMin(s64, Copies[2], MIBAdd);
234 auto SMax = B.buildSMax(s64, Copies[2], MIBAdd);
240 auto UMin = B.buildUMin(s64, Copies[2], MIBAdd);
246 auto UMax = B.buildUMax(s64, Copies[2], MIBAdd);
421 // Truncate s64 to s32.
452 LLT s64 = LLT::scalar(64);
453 auto MIBFCst64 = B.buildFConstant(s64, .5);
483 LLT s64 = LLT::scalar(64);
487 auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
488 auto MIBZExt = B.buildZExt(s64, MIBTrunc);
489 auto MIBSExt = B.buildSExt(s64, MIBTrunc);
531 LLT s64 = LLT::scalar(64);
533 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
537 m_GAdd(m_SpecificType(s64), m_Reg())));
547 mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
552 auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
567 LLT s64 = LLT::scalar(64);
569 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
573 m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
601 LLT s64 = LLT::scalar(64);
602 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
608 EXPECT_EQ(Ty, s64);
639 LLT s64 = LLT::scalar(64);
640 auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
655 LLT s64 = LLT::scalar(64);
656 LLT v4s64 = LLT::fixed_vector(4, s64);
659 B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
660 MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
676 MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
685 LLT s64 = LLT::scalar(64);
686 LLT v4s64 = LLT::fixed_vector(4, s64);
689 B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
690 MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
707 MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
757 LLT s64 = LLT::scalar(64);
761 Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
762 Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
763 Register Undef = B.buildUndef(s64).getReg(0);
847 LLT s64 = LLT::scalar(64);
849 auto NegInst = B.buildSub(s64, Zero, Copies[0]);
857 auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
862 auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
868 auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
879 LLT s64 = LLT::scalar(64);
881 auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
889 auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
895 auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
901 auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
927 auto s64 = LLT::scalar(64);
930 auto Cst1 = B.buildConstant(s64, 42);
931 auto Cst2 = B.buildConstant(s64, 314);
932 auto Add = B.buildAdd(s64, Cst1, Cst2);
933 auto Sub = B.buildSub(s64, Add, Cst1);
948 auto Add2 = B.buildAdd(s64, Sub, Cst1);