1 //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for Mips.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12
13 #include "MipsLegalizerInfo.h"
14 #include "MipsTargetMachine.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
16 #include "llvm/IR/IntrinsicsMips.h"
17
18 using namespace llvm;
19
20 struct TypesAndMemOps {
21 LLT ValTy;
22 LLT PtrTy;
23 unsigned MemSize;
24 bool SystemSupportsUnalignedAccess;
25 };
26
27 // Assumes power of 2 memory size. Subtargets that have only naturally-aligned
28 // memory access need to perform additional legalization here.
isUnalignedMemmoryAccess(uint64_t MemSize,uint64_t AlignInBits)29 static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) {
30 assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size");
31 assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align");
32 if (MemSize > AlignInBits)
33 return true;
34 return false;
35 }
36
37 static bool
CheckTy0Ty1MemSizeAlign(const LegalityQuery & Query,std::initializer_list<TypesAndMemOps> SupportedValues)38 CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query,
39 std::initializer_list<TypesAndMemOps> SupportedValues) {
40 unsigned QueryMemSize = Query.MMODescrs[0].SizeInBits;
41
42 // Non power of two memory access is never legal.
43 if (!isPowerOf2_64(QueryMemSize))
44 return false;
45
46 for (auto &Val : SupportedValues) {
47 if (Val.ValTy != Query.Types[0])
48 continue;
49 if (Val.PtrTy != Query.Types[1])
50 continue;
51 if (Val.MemSize != QueryMemSize)
52 continue;
53 if (!Val.SystemSupportsUnalignedAccess &&
54 isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits))
55 return false;
56 return true;
57 }
58 return false;
59 }
60
CheckTyN(unsigned N,const LegalityQuery & Query,std::initializer_list<LLT> SupportedValues)61 static bool CheckTyN(unsigned N, const LegalityQuery &Query,
62 std::initializer_list<LLT> SupportedValues) {
63 return llvm::is_contained(SupportedValues, Query.Types[N]);
64 }
65
MipsLegalizerInfo(const MipsSubtarget & ST)66 MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
67 using namespace TargetOpcode;
68
69 const LLT s1 = LLT::scalar(1);
70 const LLT s32 = LLT::scalar(32);
71 const LLT s64 = LLT::scalar(64);
72 const LLT v16s8 = LLT::vector(16, 8);
73 const LLT v8s16 = LLT::vector(8, 16);
74 const LLT v4s32 = LLT::vector(4, 32);
75 const LLT v2s64 = LLT::vector(2, 64);
76 const LLT p0 = LLT::pointer(0, 32);
77
78 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
79 .legalIf([=, &ST](const LegalityQuery &Query) {
80 if (CheckTyN(0, Query, {s32}))
81 return true;
82 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
83 return true;
84 return false;
85 })
86 .clampScalar(0, s32, s32);
87
88 getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO})
89 .lowerFor({{s32, s1}});
90
91 getActionDefinitionsBuilder(G_UMULH)
92 .legalFor({s32})
93 .maxScalar(0, s32);
94
95 // MIPS32r6 does not have alignment restrictions for memory access.
96 // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned
97 // to at least a multiple of its own size. There is however a two instruction
98 // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr)
99 // therefore 4 byte load and store are legal and will use NoAlignRequirements.
100 bool NoAlignRequirements = true;
101
102 getActionDefinitionsBuilder({G_LOAD, G_STORE})
103 .legalIf([=, &ST](const LegalityQuery &Query) {
104 if (CheckTy0Ty1MemSizeAlign(
105 Query, {{s32, p0, 8, NoAlignRequirements},
106 {s32, p0, 16, ST.systemSupportsUnalignedAccess()},
107 {s32, p0, 32, NoAlignRequirements},
108 {p0, p0, 32, NoAlignRequirements},
109 {s64, p0, 64, ST.systemSupportsUnalignedAccess()}}))
110 return true;
111 if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign(
112 Query, {{v16s8, p0, 128, NoAlignRequirements},
113 {v8s16, p0, 128, NoAlignRequirements},
114 {v4s32, p0, 128, NoAlignRequirements},
115 {v2s64, p0, 128, NoAlignRequirements}}))
116 return true;
117 return false;
118 })
119 // Custom lower scalar memory access, up to 8 bytes, for:
120 // - non-power-of-2 MemSizes
121 // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
122 .customIf([=, &ST](const LegalityQuery &Query) {
123 if (!Query.Types[0].isScalar() || Query.Types[1] != p0 ||
124 Query.Types[0] == s1)
125 return false;
126
127 unsigned Size = Query.Types[0].getSizeInBits();
128 unsigned QueryMemSize = Query.MMODescrs[0].SizeInBits;
129 assert(QueryMemSize <= Size && "Scalar can't hold MemSize");
130
131 if (Size > 64 || QueryMemSize > 64)
132 return false;
133
134 if (!isPowerOf2_64(Query.MMODescrs[0].SizeInBits))
135 return true;
136
137 if (!ST.systemSupportsUnalignedAccess() &&
138 isUnalignedMemmoryAccess(QueryMemSize,
139 Query.MMODescrs[0].AlignInBits)) {
140 assert(QueryMemSize != 32 && "4 byte load and store are legal");
141 return true;
142 }
143
144 return false;
145 })
146 .minScalar(0, s32);
147
148 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
149 .legalFor({s32, s64});
150
151 getActionDefinitionsBuilder(G_UNMERGE_VALUES)
152 .legalFor({{s32, s64}});
153
154 getActionDefinitionsBuilder(G_MERGE_VALUES)
155 .legalFor({{s64, s32}});
156
157 getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD})
158 .legalForTypesWithMemDesc({{s32, p0, 8, 8},
159 {s32, p0, 16, 8}})
160 .clampScalar(0, s32, s32);
161
162 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
163 .legalIf([](const LegalityQuery &Query) { return false; })
164 .maxScalar(0, s32);
165
166 getActionDefinitionsBuilder(G_TRUNC)
167 .legalIf([](const LegalityQuery &Query) { return false; })
168 .maxScalar(1, s32);
169
170 getActionDefinitionsBuilder(G_SELECT)
171 .legalForCartesianProduct({p0, s32, s64}, {s32})
172 .minScalar(0, s32)
173 .minScalar(1, s32);
174
175 getActionDefinitionsBuilder(G_BRCOND)
176 .legalFor({s32})
177 .minScalar(0, s32);
178
179 getActionDefinitionsBuilder(G_BRJT)
180 .legalFor({{p0, s32}});
181
182 getActionDefinitionsBuilder(G_BRINDIRECT)
183 .legalFor({p0});
184
185 getActionDefinitionsBuilder(G_PHI)
186 .legalFor({p0, s32, s64})
187 .minScalar(0, s32);
188
189 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
190 .legalFor({s32})
191 .clampScalar(0, s32, s32);
192
193 getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
194 .legalIf([=, &ST](const LegalityQuery &Query) {
195 if (CheckTyN(0, Query, {s32}))
196 return true;
197 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
198 return true;
199 return false;
200 })
201 .minScalar(0, s32)
202 .libcallFor({s64});
203
204 getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
205 .legalFor({{s32, s32}})
206 .clampScalar(1, s32, s32)
207 .clampScalar(0, s32, s32);
208
209 getActionDefinitionsBuilder(G_ICMP)
210 .legalForCartesianProduct({s32}, {s32, p0})
211 .clampScalar(1, s32, s32)
212 .minScalar(0, s32);
213
214 getActionDefinitionsBuilder(G_CONSTANT)
215 .legalFor({s32})
216 .clampScalar(0, s32, s32);
217
218 getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
219 .legalFor({{p0, s32}});
220
221 getActionDefinitionsBuilder(G_PTRTOINT)
222 .legalFor({{s32, p0}});
223
224 getActionDefinitionsBuilder(G_FRAME_INDEX)
225 .legalFor({p0});
226
227 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE})
228 .legalFor({p0});
229
230 getActionDefinitionsBuilder(G_DYN_STACKALLOC)
231 .lowerFor({{p0, s32}});
232
233 getActionDefinitionsBuilder(G_VASTART)
234 .legalFor({p0});
235
236 getActionDefinitionsBuilder(G_BSWAP)
237 .legalIf([=, &ST](const LegalityQuery &Query) {
238 if (ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
239 return true;
240 return false;
241 })
242 .lowerIf([=, &ST](const LegalityQuery &Query) {
243 if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
244 return true;
245 return false;
246 })
247 .maxScalar(0, s32);
248
249 getActionDefinitionsBuilder(G_BITREVERSE)
250 .lowerFor({s32})
251 .maxScalar(0, s32);
252
253 getActionDefinitionsBuilder(G_CTLZ)
254 .legalFor({{s32, s32}})
255 .maxScalar(0, s32)
256 .maxScalar(1, s32);
257 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
258 .lowerFor({{s32, s32}});
259
260 getActionDefinitionsBuilder(G_CTTZ)
261 .lowerFor({{s32, s32}})
262 .maxScalar(0, s32)
263 .maxScalar(1, s32);
264 getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
265 .lowerFor({{s32, s32}, {s64, s64}});
266
267 getActionDefinitionsBuilder(G_CTPOP)
268 .lowerFor({{s32, s32}})
269 .clampScalar(0, s32, s32)
270 .clampScalar(1, s32, s32);
271
272 // FP instructions
273 getActionDefinitionsBuilder(G_FCONSTANT)
274 .legalFor({s32, s64});
275
276 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT})
277 .legalIf([=, &ST](const LegalityQuery &Query) {
278 if (CheckTyN(0, Query, {s32, s64}))
279 return true;
280 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
281 return true;
282 return false;
283 });
284
285 getActionDefinitionsBuilder(G_FCMP)
286 .legalFor({{s32, s32}, {s32, s64}})
287 .minScalar(0, s32);
288
289 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
290 .libcallFor({s32, s64});
291
292 getActionDefinitionsBuilder(G_FPEXT)
293 .legalFor({{s64, s32}});
294
295 getActionDefinitionsBuilder(G_FPTRUNC)
296 .legalFor({{s32, s64}});
297
298 // FP to int conversion instructions
299 getActionDefinitionsBuilder(G_FPTOSI)
300 .legalForCartesianProduct({s32}, {s64, s32})
301 .libcallForCartesianProduct({s64}, {s64, s32})
302 .minScalar(0, s32);
303
304 getActionDefinitionsBuilder(G_FPTOUI)
305 .libcallForCartesianProduct({s64}, {s64, s32})
306 .lowerForCartesianProduct({s32}, {s64, s32})
307 .minScalar(0, s32);
308
309 // Int to FP conversion instructions
310 getActionDefinitionsBuilder(G_SITOFP)
311 .legalForCartesianProduct({s64, s32}, {s32})
312 .libcallForCartesianProduct({s64, s32}, {s64})
313 .minScalar(1, s32);
314
315 getActionDefinitionsBuilder(G_UITOFP)
316 .libcallForCartesianProduct({s64, s32}, {s64})
317 .customForCartesianProduct({s64, s32}, {s32})
318 .minScalar(1, s32);
319
320 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
321
322 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
323
324 computeTables();
325 verify(*ST.getInstrInfo());
326 }
327
legalizeCustom(LegalizerHelper & Helper,MachineInstr & MI) const328 bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
329 MachineInstr &MI) const {
330 using namespace TargetOpcode;
331
332 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
333 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
334
335 const LLT s32 = LLT::scalar(32);
336 const LLT s64 = LLT::scalar(64);
337
338 switch (MI.getOpcode()) {
339 case G_LOAD:
340 case G_STORE: {
341 unsigned MemSize = (**MI.memoperands_begin()).getSize();
342 Register Val = MI.getOperand(0).getReg();
343 unsigned Size = MRI.getType(Val).getSizeInBits();
344
345 MachineMemOperand *MMOBase = *MI.memoperands_begin();
346
347 assert(MemSize <= 8 && "MemSize is too large");
348 assert(Size <= 64 && "Scalar size is too large");
349
350 // Split MemSize into two, P2HalfMemSize is largest power of two smaller
351 // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
352 unsigned P2HalfMemSize, RemMemSize;
353 if (isPowerOf2_64(MemSize)) {
354 P2HalfMemSize = RemMemSize = MemSize / 2;
355 } else {
356 P2HalfMemSize = 1 << Log2_32(MemSize);
357 RemMemSize = MemSize - P2HalfMemSize;
358 }
359
360 Register BaseAddr = MI.getOperand(1).getReg();
361 LLT PtrTy = MRI.getType(BaseAddr);
362 MachineFunction &MF = MIRBuilder.getMF();
363
364 auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize);
365 auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize);
366
367 if (MI.getOpcode() == G_STORE) {
368 // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
369 if (Size < 32)
370 Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0);
371 if (Size > 32 && Size < 64)
372 Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0);
373
374 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
375 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
376
377 if (MI.getOpcode() == G_STORE && MemSize <= 4) {
378 MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp);
379 auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8);
380 auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits);
381 MIRBuilder.buildStore(Shift, Addr, *RemMemOp);
382 } else {
383 auto Unmerge = MIRBuilder.buildUnmerge(s32, Val);
384 MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
385 MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp);
386 }
387 }
388
389 if (MI.getOpcode() == G_LOAD) {
390
391 if (MemSize <= 4) {
392 // This is anyextending load, use 4 byte lwr/lwl.
393 auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4);
394
395 if (Size == 32)
396 MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO);
397 else {
398 auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO);
399 MIRBuilder.buildTrunc(Val, Load.getReg(0));
400 }
401
402 } else {
403 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
404 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
405
406 auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp);
407 auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
408
409 if (Size == 64)
410 MIRBuilder.buildMerge(Val, {Load_P2Half, Load_Rem});
411 else {
412 auto Merge = MIRBuilder.buildMerge(s64, {Load_P2Half, Load_Rem});
413 MIRBuilder.buildTrunc(Val, Merge);
414 }
415 }
416 }
417 MI.eraseFromParent();
418 break;
419 }
420 case G_UITOFP: {
421 Register Dst = MI.getOperand(0).getReg();
422 Register Src = MI.getOperand(1).getReg();
423 LLT DstTy = MRI.getType(Dst);
424 LLT SrcTy = MRI.getType(Src);
425
426 if (SrcTy != s32)
427 return false;
428 if (DstTy != s32 && DstTy != s64)
429 return false;
430
431 // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
432 // unsigned to double. Mantissa has 52 bits so we use following trick:
433 // First make floating point bit mask 0x43300000ABCDEFGH.
434 // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
435 // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
436 // Done. Trunc double to float if needed.
437
438 auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000));
439 auto Bitcast = MIRBuilder.buildMerge(s64, {Src, C_HiMask.getReg(0)});
440
441 MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
442 s64, BitsToDouble(UINT64_C(0x4330000000000000)));
443
444 if (DstTy == s64)
445 MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP);
446 else {
447 MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP);
448 MIRBuilder.buildFPTrunc(Dst, ResF64);
449 }
450
451 MI.eraseFromParent();
452 break;
453 }
454 default:
455 return false;
456 }
457
458 return true;
459 }
460
SelectMSA3OpIntrinsic(MachineInstr & MI,unsigned Opcode,MachineIRBuilder & MIRBuilder,const MipsSubtarget & ST)461 static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
462 MachineIRBuilder &MIRBuilder,
463 const MipsSubtarget &ST) {
464 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
465 if (!MIRBuilder.buildInstr(Opcode)
466 .add(MI.getOperand(0))
467 .add(MI.getOperand(2))
468 .add(MI.getOperand(3))
469 .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
470 *ST.getRegBankInfo()))
471 return false;
472 MI.eraseFromParent();
473 return true;
474 }
475
MSA3OpIntrinsicToGeneric(MachineInstr & MI,unsigned Opcode,MachineIRBuilder & MIRBuilder,const MipsSubtarget & ST)476 static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
477 MachineIRBuilder &MIRBuilder,
478 const MipsSubtarget &ST) {
479 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
480 MIRBuilder.buildInstr(Opcode)
481 .add(MI.getOperand(0))
482 .add(MI.getOperand(2))
483 .add(MI.getOperand(3));
484 MI.eraseFromParent();
485 return true;
486 }
487
MSA2OpIntrinsicToGeneric(MachineInstr & MI,unsigned Opcode,MachineIRBuilder & MIRBuilder,const MipsSubtarget & ST)488 static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
489 MachineIRBuilder &MIRBuilder,
490 const MipsSubtarget &ST) {
491 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
492 MIRBuilder.buildInstr(Opcode)
493 .add(MI.getOperand(0))
494 .add(MI.getOperand(2));
495 MI.eraseFromParent();
496 return true;
497 }
498
legalizeIntrinsic(LegalizerHelper & Helper,MachineInstr & MI) const499 bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
500 MachineInstr &MI) const {
501 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
502 const MipsSubtarget &ST =
503 static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget());
504 const MipsInstrInfo &TII = *ST.getInstrInfo();
505 const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
506 const RegisterBankInfo &RBI = *ST.getRegBankInfo();
507
508 switch (MI.getIntrinsicID()) {
509 case Intrinsic::trap: {
510 MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
511 MI.eraseFromParent();
512 return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
513 }
514 case Intrinsic::vacopy: {
515 MachinePointerInfo MPO;
516 auto Tmp =
517 MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2),
518 *MI.getMF()->getMachineMemOperand(
519 MPO, MachineMemOperand::MOLoad, 4, Align(4)));
520 MIRBuilder.buildStore(Tmp, MI.getOperand(1),
521 *MI.getMF()->getMachineMemOperand(
522 MPO, MachineMemOperand::MOStore, 4, Align(4)));
523 MI.eraseFromParent();
524 return true;
525 }
526 case Intrinsic::mips_addv_b:
527 case Intrinsic::mips_addv_h:
528 case Intrinsic::mips_addv_w:
529 case Intrinsic::mips_addv_d:
530 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST);
531 case Intrinsic::mips_addvi_b:
532 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST);
533 case Intrinsic::mips_addvi_h:
534 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST);
535 case Intrinsic::mips_addvi_w:
536 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST);
537 case Intrinsic::mips_addvi_d:
538 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST);
539 case Intrinsic::mips_subv_b:
540 case Intrinsic::mips_subv_h:
541 case Intrinsic::mips_subv_w:
542 case Intrinsic::mips_subv_d:
543 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST);
544 case Intrinsic::mips_subvi_b:
545 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST);
546 case Intrinsic::mips_subvi_h:
547 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST);
548 case Intrinsic::mips_subvi_w:
549 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST);
550 case Intrinsic::mips_subvi_d:
551 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST);
552 case Intrinsic::mips_mulv_b:
553 case Intrinsic::mips_mulv_h:
554 case Intrinsic::mips_mulv_w:
555 case Intrinsic::mips_mulv_d:
556 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST);
557 case Intrinsic::mips_div_s_b:
558 case Intrinsic::mips_div_s_h:
559 case Intrinsic::mips_div_s_w:
560 case Intrinsic::mips_div_s_d:
561 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST);
562 case Intrinsic::mips_mod_s_b:
563 case Intrinsic::mips_mod_s_h:
564 case Intrinsic::mips_mod_s_w:
565 case Intrinsic::mips_mod_s_d:
566 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST);
567 case Intrinsic::mips_div_u_b:
568 case Intrinsic::mips_div_u_h:
569 case Intrinsic::mips_div_u_w:
570 case Intrinsic::mips_div_u_d:
571 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST);
572 case Intrinsic::mips_mod_u_b:
573 case Intrinsic::mips_mod_u_h:
574 case Intrinsic::mips_mod_u_w:
575 case Intrinsic::mips_mod_u_d:
576 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST);
577 case Intrinsic::mips_fadd_w:
578 case Intrinsic::mips_fadd_d:
579 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST);
580 case Intrinsic::mips_fsub_w:
581 case Intrinsic::mips_fsub_d:
582 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST);
583 case Intrinsic::mips_fmul_w:
584 case Intrinsic::mips_fmul_d:
585 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST);
586 case Intrinsic::mips_fdiv_w:
587 case Intrinsic::mips_fdiv_d:
588 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST);
589 case Intrinsic::mips_fmax_a_w:
590 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST);
591 case Intrinsic::mips_fmax_a_d:
592 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST);
593 case Intrinsic::mips_fsqrt_w:
594 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
595 case Intrinsic::mips_fsqrt_d:
596 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
597 default:
598 break;
599 }
600 return true;
601 }
602