xref: /llvm-project/flang/lib/Optimizer/CodeGen/Target.cpp (revision 30408f5ccfdfd0b313c6ec8676f92f3501e18efd)
1 //===-- Target.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "flang/Optimizer/CodeGen/Target.h"
14 #include "flang/Optimizer/Builder/Todo.h"
15 #include "flang/Optimizer/Dialect/FIRType.h"
16 #include "flang/Optimizer/Dialect/Support/KindMapping.h"
17 #include "flang/Optimizer/Support/FatalError.h"
18 #include "mlir/IR/BuiltinTypes.h"
19 #include "mlir/IR/TypeRange.h"
20 
21 #define DEBUG_TYPE "flang-codegen-target"
22 
23 using namespace fir;
24 
25 namespace fir::details {
26 llvm::StringRef Attributes::getIntExtensionAttrName() const {
27   // The attribute names are available via LLVM dialect interfaces
28   // like getZExtAttrName(), getByValAttrName(), etc., so we'd better
29   // use them than literals.
30   if (isZeroExt())
31     return "llvm.zeroext";
32   else if (isSignExt())
33     return "llvm.signext";
34   return {};
35 }
36 } // namespace fir::details
37 
38 // Reduce a REAL/float type to the floating point semantics.
39 static const llvm::fltSemantics &floatToSemantics(const KindMapping &kindMap,
40                                                   mlir::Type type) {
41   assert(isa_real(type));
42   if (auto ty = type.dyn_cast<fir::RealType>())
43     return kindMap.getFloatSemantics(ty.getFKind());
44   return type.cast<mlir::FloatType>().getFloatSemantics();
45 }
46 
47 namespace {
48 template <typename S>
49 struct GenericTarget : public CodeGenSpecifics {
50   using CodeGenSpecifics::CodeGenSpecifics;
51   using AT = CodeGenSpecifics::Attributes;
52 
53   mlir::Type complexMemoryType(mlir::Type eleTy) const override {
54     assert(fir::isa_real(eleTy));
55     // Use a type that will be translated into LLVM as:
56     // { t, t }   struct of 2 eleTy
57     return mlir::TupleType::get(eleTy.getContext(),
58                                 mlir::TypeRange{eleTy, eleTy});
59   }
60 
61   mlir::Type boxcharMemoryType(mlir::Type eleTy) const override {
62     auto idxTy = mlir::IntegerType::get(eleTy.getContext(), S::defaultWidth);
63     auto ptrTy = fir::ReferenceType::get(eleTy);
64     // Use a type that will be translated into LLVM as:
65     // { t*, index }
66     return mlir::TupleType::get(eleTy.getContext(),
67                                 mlir::TypeRange{ptrTy, idxTy});
68   }
69 
70   Marshalling boxcharArgumentType(mlir::Type eleTy, bool sret) const override {
71     CodeGenSpecifics::Marshalling marshal;
72     auto idxTy = mlir::IntegerType::get(eleTy.getContext(), S::defaultWidth);
73     auto ptrTy = fir::ReferenceType::get(eleTy);
74     marshal.emplace_back(ptrTy, AT{});
75     // Return value arguments are grouped as a pair. Others are passed in a
76     // split format with all pointers first (in the declared position) and all
77     // LEN arguments appended after all of the dummy arguments.
78     // NB: Other conventions/ABIs can/should be supported via options.
79     marshal.emplace_back(idxTy, AT{/*alignment=*/0, /*byval=*/false,
80                                    /*sret=*/sret, /*append=*/!sret});
81     return marshal;
82   }
83 
84   CodeGenSpecifics::Marshalling
85   integerArgumentType(mlir::Location loc,
86                       mlir::IntegerType argTy) const override {
87     CodeGenSpecifics::Marshalling marshal;
88     AT::IntegerExtension intExt = AT::IntegerExtension::None;
89     if (argTy.getWidth() < getCIntTypeWidth()) {
90       // isSigned() and isUnsigned() branches below are dead code currently.
91       // If needed, we can generate calls with signed/unsigned argument types
92       // to more precisely match C side (e.g. for Fortran runtime functions
93       // with 'unsigned short' arguments).
94       if (argTy.isSigned())
95         intExt = AT::IntegerExtension::Sign;
96       else if (argTy.isUnsigned())
97         intExt = AT::IntegerExtension::Zero;
98       else if (argTy.isSignless()) {
99         // Zero extend for 'i1' and sign extend for other types.
100         if (argTy.getWidth() == 1)
101           intExt = AT::IntegerExtension::Zero;
102         else
103           intExt = AT::IntegerExtension::Sign;
104       }
105     }
106 
107     marshal.emplace_back(argTy, AT{/*alignment=*/0, /*byval=*/false,
108                                    /*sret=*/false, /*append=*/false,
109                                    /*intExt=*/intExt});
110     return marshal;
111   }
112 
113   CodeGenSpecifics::Marshalling
114   integerReturnType(mlir::Location loc,
115                     mlir::IntegerType argTy) const override {
116     return integerArgumentType(loc, argTy);
117   }
118 
119   // Width of 'int' type is 32-bits for almost all targets, except
120   // for AVR and MSP430 (see TargetInfo initializations
121   // in clang/lib/Basic/Targets).
122   unsigned char getCIntTypeWidth() const override { return 32; }
123 };
124 } // namespace
125 
126 //===----------------------------------------------------------------------===//
127 // i386 (x86 32 bit) linux target specifics.
128 //===----------------------------------------------------------------------===//
129 
130 namespace {
131 struct TargetI386 : public GenericTarget<TargetI386> {
132   using GenericTarget::GenericTarget;
133 
134   static constexpr int defaultWidth = 32;
135 
136   CodeGenSpecifics::Marshalling
137   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
138     assert(fir::isa_real(eleTy));
139     CodeGenSpecifics::Marshalling marshal;
140     // Use a type that will be translated into LLVM as:
141     // { t, t }   struct of 2 eleTy, byval, align 4
142     auto structTy =
143         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
144     marshal.emplace_back(fir::ReferenceType::get(structTy),
145                          AT{/*alignment=*/4, /*byval=*/true});
146     return marshal;
147   }
148 
149   CodeGenSpecifics::Marshalling
150   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
151     assert(fir::isa_real(eleTy));
152     CodeGenSpecifics::Marshalling marshal;
153     const auto *sem = &floatToSemantics(kindMap, eleTy);
154     if (sem == &llvm::APFloat::IEEEsingle()) {
155       // i64   pack both floats in a 64-bit GPR
156       marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64),
157                            AT{});
158     } else if (sem == &llvm::APFloat::IEEEdouble()) {
159       // Use a type that will be translated into LLVM as:
160       // { t, t }   struct of 2 eleTy, sret, align 4
161       auto structTy = mlir::TupleType::get(eleTy.getContext(),
162                                            mlir::TypeRange{eleTy, eleTy});
163       marshal.emplace_back(fir::ReferenceType::get(structTy),
164                            AT{/*alignment=*/4, /*byval=*/false, /*sret=*/true});
165     } else {
166       TODO(loc, "complex for this precision");
167     }
168     return marshal;
169   }
170 };
171 } // namespace
172 
173 //===----------------------------------------------------------------------===//
174 // x86_64 (x86 64 bit) linux target specifics.
175 //===----------------------------------------------------------------------===//
176 
177 namespace {
178 struct TargetX86_64 : public GenericTarget<TargetX86_64> {
179   using GenericTarget::GenericTarget;
180 
181   static constexpr int defaultWidth = 64;
182 
183   CodeGenSpecifics::Marshalling
184   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
185     CodeGenSpecifics::Marshalling marshal;
186     const auto *sem = &floatToSemantics(kindMap, eleTy);
187     if (sem == &llvm::APFloat::IEEEsingle()) {
188       // <2 x t>   vector of 2 eleTy
189       marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{});
190     } else if (sem == &llvm::APFloat::IEEEdouble()) {
191       // two distinct double arguments
192       marshal.emplace_back(eleTy, AT{});
193       marshal.emplace_back(eleTy, AT{});
194     } else if (sem == &llvm::APFloat::IEEEquad()) {
195       // Use a type that will be translated into LLVM as:
196       // { fp128, fp128 }   struct of 2 fp128, byval, align 16
197       marshal.emplace_back(
198           fir::ReferenceType::get(mlir::TupleType::get(
199               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
200           AT{/*align=*/16, /*byval=*/true});
201     } else {
202       TODO(loc, "complex for this precision");
203     }
204     return marshal;
205   }
206 
207   CodeGenSpecifics::Marshalling
208   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
209     CodeGenSpecifics::Marshalling marshal;
210     const auto *sem = &floatToSemantics(kindMap, eleTy);
211     if (sem == &llvm::APFloat::IEEEsingle()) {
212       // <2 x t>   vector of 2 eleTy
213       marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{});
214     } else if (sem == &llvm::APFloat::IEEEdouble()) {
215       // Use a type that will be translated into LLVM as:
216       // { double, double }   struct of 2 double
217       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
218                                                 mlir::TypeRange{eleTy, eleTy}),
219                            AT{});
220     } else if (sem == &llvm::APFloat::IEEEquad()) {
221       // Use a type that will be translated into LLVM as:
222       // { fp128, fp128 }   struct of 2 fp128, sret, align 16
223       marshal.emplace_back(
224           fir::ReferenceType::get(mlir::TupleType::get(
225               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
226           AT{/*align=*/16, /*byval=*/false, /*sret=*/true});
227     } else {
228       TODO(loc, "complex for this precision");
229     }
230     return marshal;
231   }
232 };
233 } // namespace
234 
235 //===----------------------------------------------------------------------===//
236 // AArch64 linux target specifics.
237 //===----------------------------------------------------------------------===//
238 
239 namespace {
240 struct TargetAArch64 : public GenericTarget<TargetAArch64> {
241   using GenericTarget::GenericTarget;
242 
243   static constexpr int defaultWidth = 64;
244 
245   CodeGenSpecifics::Marshalling
246   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
247     CodeGenSpecifics::Marshalling marshal;
248     const auto *sem = &floatToSemantics(kindMap, eleTy);
249     if (sem == &llvm::APFloat::IEEEsingle() ||
250         sem == &llvm::APFloat::IEEEdouble()) {
251       // [2 x t]   array of 2 eleTy
252       marshal.emplace_back(fir::SequenceType::get({2}, eleTy), AT{});
253     } else {
254       TODO(loc, "complex for this precision");
255     }
256     return marshal;
257   }
258 
259   CodeGenSpecifics::Marshalling
260   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
261     CodeGenSpecifics::Marshalling marshal;
262     const auto *sem = &floatToSemantics(kindMap, eleTy);
263     if (sem == &llvm::APFloat::IEEEsingle() ||
264         sem == &llvm::APFloat::IEEEdouble()) {
265       // Use a type that will be translated into LLVM as:
266       // { t, t }   struct of 2 eleTy
267       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
268                                                 mlir::TypeRange{eleTy, eleTy}),
269                            AT{});
270     } else {
271       TODO(loc, "complex for this precision");
272     }
273     return marshal;
274   }
275 };
276 } // namespace
277 
278 //===----------------------------------------------------------------------===//
279 // PPC64 (AIX 64 bit) target specifics.
280 //===----------------------------------------------------------------------===//
281 
282 namespace {
283 struct TargetPPC64 : public GenericTarget<TargetPPC64> {
284   using GenericTarget::GenericTarget;
285 
286   static constexpr int defaultWidth = 64;
287 
288   CodeGenSpecifics::Marshalling
289   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
290     CodeGenSpecifics::Marshalling marshal;
291     // two distinct element type arguments (re, im)
292     marshal.emplace_back(eleTy, AT{});
293     marshal.emplace_back(eleTy, AT{});
294     return marshal;
295   }
296 
297   CodeGenSpecifics::Marshalling
298   complexReturnType(mlir::Location, mlir::Type eleTy) const override {
299     CodeGenSpecifics::Marshalling marshal;
300     // Use a type that will be translated into LLVM as:
301     // { t, t }   struct of 2 element type
302     marshal.emplace_back(
303         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
304         AT{});
305     return marshal;
306   }
307 };
308 } // namespace
309 
310 //===----------------------------------------------------------------------===//
311 // PPC64le linux target specifics.
312 //===----------------------------------------------------------------------===//
313 
314 namespace {
315 struct TargetPPC64le : public GenericTarget<TargetPPC64le> {
316   using GenericTarget::GenericTarget;
317 
318   static constexpr int defaultWidth = 64;
319 
320   CodeGenSpecifics::Marshalling
321   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
322     CodeGenSpecifics::Marshalling marshal;
323     // two distinct element type arguments (re, im)
324     marshal.emplace_back(eleTy, AT{});
325     marshal.emplace_back(eleTy, AT{});
326     return marshal;
327   }
328 
329   CodeGenSpecifics::Marshalling
330   complexReturnType(mlir::Location, mlir::Type eleTy) const override {
331     CodeGenSpecifics::Marshalling marshal;
332     // Use a type that will be translated into LLVM as:
333     // { t, t }   struct of 2 element type
334     marshal.emplace_back(
335         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
336         AT{});
337     return marshal;
338   }
339 };
340 } // namespace
341 
342 //===----------------------------------------------------------------------===//
343 // sparc (sparc 32 bit) target specifics.
344 //===----------------------------------------------------------------------===//
345 
346 namespace {
347 struct TargetSparc : public GenericTarget<TargetSparc> {
348   using GenericTarget::GenericTarget;
349 
350   static constexpr int defaultWidth = 32;
351 
352   CodeGenSpecifics::Marshalling
353   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
354     assert(fir::isa_real(eleTy));
355     CodeGenSpecifics::Marshalling marshal;
356     // Use a type that will be translated into LLVM as:
357     // { t, t }   struct of 2 eleTy
358     auto structTy =
359         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
360     marshal.emplace_back(fir::ReferenceType::get(structTy), AT{});
361     return marshal;
362   }
363 
364   CodeGenSpecifics::Marshalling
365   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
366     assert(fir::isa_real(eleTy));
367     CodeGenSpecifics::Marshalling marshal;
368     // Use a type that will be translated into LLVM as:
369     // { t, t }   struct of 2 eleTy, byval
370     auto structTy =
371         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
372     marshal.emplace_back(fir::ReferenceType::get(structTy),
373                          AT{/*alignment=*/0, /*byval=*/true});
374     return marshal;
375   }
376 };
377 } // namespace
378 
379 //===----------------------------------------------------------------------===//
380 // sparcv9 (sparc 64 bit) target specifics.
381 //===----------------------------------------------------------------------===//
382 
383 namespace {
384 struct TargetSparcV9 : public GenericTarget<TargetSparcV9> {
385   using GenericTarget::GenericTarget;
386 
387   static constexpr int defaultWidth = 64;
388 
389   CodeGenSpecifics::Marshalling
390   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
391     CodeGenSpecifics::Marshalling marshal;
392     const auto *sem = &floatToSemantics(kindMap, eleTy);
393     if (sem == &llvm::APFloat::IEEEsingle() ||
394         sem == &llvm::APFloat::IEEEdouble()) {
395       // two distinct float, double arguments
396       marshal.emplace_back(eleTy, AT{});
397       marshal.emplace_back(eleTy, AT{});
398     } else if (sem == &llvm::APFloat::IEEEquad()) {
399       // Use a type that will be translated into LLVM as:
400       // { fp128, fp128 }   struct of 2 fp128, byval, align 16
401       marshal.emplace_back(
402           fir::ReferenceType::get(mlir::TupleType::get(
403               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
404           AT{/*align=*/16, /*byval=*/true});
405     } else {
406       TODO(loc, "complex for this precision");
407     }
408     return marshal;
409   }
410 
411   CodeGenSpecifics::Marshalling
412   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
413     CodeGenSpecifics::Marshalling marshal;
414     // Use a type that will be translated into LLVM as:
415     // { eleTy, eleTy }   struct of 2 eleTy
416     marshal.emplace_back(
417         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
418         AT{});
419     return marshal;
420   }
421 };
422 } // namespace
423 
424 //===----------------------------------------------------------------------===//
425 // RISCV64 linux target specifics.
426 //===----------------------------------------------------------------------===//
427 
428 namespace {
429 struct TargetRISCV64 : public GenericTarget<TargetRISCV64> {
430   using GenericTarget::GenericTarget;
431 
432   static constexpr int defaultWidth = 64;
433 
434   CodeGenSpecifics::Marshalling
435   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
436     CodeGenSpecifics::Marshalling marshal;
437     const auto *sem = &floatToSemantics(kindMap, eleTy);
438     if (sem == &llvm::APFloat::IEEEsingle() ||
439         sem == &llvm::APFloat::IEEEdouble()) {
440       // Two distinct element type arguments (re, im)
441       marshal.emplace_back(eleTy, AT{});
442       marshal.emplace_back(eleTy, AT{});
443     } else {
444       TODO(loc, "complex for this precision");
445     }
446     return marshal;
447   }
448 
449   CodeGenSpecifics::Marshalling
450   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
451     CodeGenSpecifics::Marshalling marshal;
452     const auto *sem = &floatToSemantics(kindMap, eleTy);
453     if (sem == &llvm::APFloat::IEEEsingle() ||
454         sem == &llvm::APFloat::IEEEdouble()) {
455       // Use a type that will be translated into LLVM as:
456       // { t, t }   struct of 2 eleTy, byVal
457       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
458                                                 mlir::TypeRange{eleTy, eleTy}),
459                            AT{/*alignment=*/0, /*byval=*/true});
460     } else {
461       TODO(loc, "complex for this precision");
462     }
463     return marshal;
464   }
465 };
466 } // namespace
467 
468 //===----------------------------------------------------------------------===//
469 // AMDGPU linux target specifics.
470 //===----------------------------------------------------------------------===//
471 
472 namespace {
473 struct TargetAMDGPU : public GenericTarget<TargetAMDGPU> {
474   using GenericTarget::GenericTarget;
475 
476   // Default size (in bits) of the index type for strings.
477   static constexpr int defaultWidth = 64;
478 
479   CodeGenSpecifics::Marshalling
480   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
481     CodeGenSpecifics::Marshalling marshal;
482     TODO(loc, "handle complex argument types");
483     return marshal;
484   }
485 
486   CodeGenSpecifics::Marshalling
487   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
488     CodeGenSpecifics::Marshalling marshal;
489     TODO(loc, "handle complex return types");
490     return marshal;
491   }
492 };
493 } // namespace
494 
495 //===----------------------------------------------------------------------===//
496 // LoongArch64 linux target specifics.
497 //===----------------------------------------------------------------------===//
498 
499 namespace {
500 struct TargetLoongArch64 : public GenericTarget<TargetLoongArch64> {
501   using GenericTarget::GenericTarget;
502 
503   static constexpr int defaultWidth = 64;
504 
505   CodeGenSpecifics::Marshalling
506   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
507     CodeGenSpecifics::Marshalling marshal;
508     const auto *sem = &floatToSemantics(kindMap, eleTy);
509     if (sem == &llvm::APFloat::IEEEsingle() ||
510         sem == &llvm::APFloat::IEEEdouble()) {
511       // Two distinct element type arguments (re, im)
512       marshal.emplace_back(eleTy, AT{});
513       marshal.emplace_back(eleTy, AT{});
514     } else {
515       TODO(loc, "complex for this precision");
516     }
517     return marshal;
518   }
519 
520   CodeGenSpecifics::Marshalling
521   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
522     CodeGenSpecifics::Marshalling marshal;
523     const auto *sem = &floatToSemantics(kindMap, eleTy);
524     if (sem == &llvm::APFloat::IEEEsingle() ||
525         sem == &llvm::APFloat::IEEEdouble()) {
526       // Use a type that will be translated into LLVM as:
527       // { t, t }   struct of 2 eleTy, byVal
528       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
529                                                 mlir::TypeRange{eleTy, eleTy}),
530                            AT{/*alignment=*/0, /*byval=*/true});
531     } else {
532       TODO(loc, "complex for this precision");
533     }
534     return marshal;
535   }
536 };
537 } // namespace
538 
539 // Instantiate the overloaded target instance based on the triple value.
540 // TODO: Add other targets to this file as needed.
541 std::unique_ptr<fir::CodeGenSpecifics>
542 fir::CodeGenSpecifics::get(mlir::MLIRContext *ctx, llvm::Triple &&trp,
543                            KindMapping &&kindMap) {
544   switch (trp.getArch()) {
545   default:
546     break;
547   case llvm::Triple::ArchType::x86:
548     return std::make_unique<TargetI386>(ctx, std::move(trp),
549                                         std::move(kindMap));
550   case llvm::Triple::ArchType::x86_64:
551     return std::make_unique<TargetX86_64>(ctx, std::move(trp),
552                                           std::move(kindMap));
553   case llvm::Triple::ArchType::aarch64:
554     return std::make_unique<TargetAArch64>(ctx, std::move(trp),
555                                            std::move(kindMap));
556   case llvm::Triple::ArchType::ppc64:
557     return std::make_unique<TargetPPC64>(ctx, std::move(trp),
558                                          std::move(kindMap));
559   case llvm::Triple::ArchType::ppc64le:
560     return std::make_unique<TargetPPC64le>(ctx, std::move(trp),
561                                            std::move(kindMap));
562   case llvm::Triple::ArchType::sparc:
563     return std::make_unique<TargetSparc>(ctx, std::move(trp),
564                                          std::move(kindMap));
565   case llvm::Triple::ArchType::sparcv9:
566     return std::make_unique<TargetSparcV9>(ctx, std::move(trp),
567                                            std::move(kindMap));
568   case llvm::Triple::ArchType::riscv64:
569     return std::make_unique<TargetRISCV64>(ctx, std::move(trp),
570                                            std::move(kindMap));
571   case llvm::Triple::ArchType::amdgcn:
572     return std::make_unique<TargetAMDGPU>(ctx, std::move(trp),
573                                           std::move(kindMap));
574   case llvm::Triple::ArchType::loongarch64:
575     return std::make_unique<TargetLoongArch64>(ctx, std::move(trp),
576                                                std::move(kindMap));
577   }
578   TODO(mlir::UnknownLoc::get(ctx), "target not implemented");
579 }
580