xref: /llvm-project/flang/lib/Optimizer/CodeGen/Target.cpp (revision be9fa9dee50138f0283a3354ce76069036b1330c)
1 //===-- Target.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "flang/Optimizer/CodeGen/Target.h"
14 #include "flang/Optimizer/Builder/Todo.h"
15 #include "flang/Optimizer/Dialect/FIRType.h"
16 #include "flang/Optimizer/Dialect/Support/KindMapping.h"
17 #include "flang/Optimizer/Support/FatalError.h"
18 #include "mlir/IR/BuiltinTypes.h"
19 #include "mlir/IR/TypeRange.h"
20 
21 #define DEBUG_TYPE "flang-codegen-target"
22 
23 using namespace fir;
24 
25 namespace fir::details {
26 llvm::StringRef Attributes::getIntExtensionAttrName() const {
27   // The attribute names are available via LLVM dialect interfaces
28   // like getZExtAttrName(), getByValAttrName(), etc., so we'd better
29   // use them than literals.
30   if (isZeroExt())
31     return "llvm.zeroext";
32   else if (isSignExt())
33     return "llvm.signext";
34   return {};
35 }
36 } // namespace fir::details
37 
38 // Reduce a REAL/float type to the floating point semantics.
39 static const llvm::fltSemantics &floatToSemantics(const KindMapping &kindMap,
40                                                   mlir::Type type) {
41   assert(isa_real(type));
42   if (auto ty = type.dyn_cast<fir::RealType>())
43     return kindMap.getFloatSemantics(ty.getFKind());
44   return type.cast<mlir::FloatType>().getFloatSemantics();
45 }
46 
47 namespace {
48 template <typename S>
49 struct GenericTarget : public CodeGenSpecifics {
50   using CodeGenSpecifics::CodeGenSpecifics;
51   using AT = CodeGenSpecifics::Attributes;
52 
53   mlir::Type complexMemoryType(mlir::Type eleTy) const override {
54     assert(fir::isa_real(eleTy));
55     // Use a type that will be translated into LLVM as:
56     // { t, t }   struct of 2 eleTy
57     return mlir::TupleType::get(eleTy.getContext(),
58                                 mlir::TypeRange{eleTy, eleTy});
59   }
60 
61   mlir::Type boxcharMemoryType(mlir::Type eleTy) const override {
62     auto idxTy = mlir::IntegerType::get(eleTy.getContext(), S::defaultWidth);
63     auto ptrTy = fir::ReferenceType::get(eleTy);
64     // Use a type that will be translated into LLVM as:
65     // { t*, index }
66     return mlir::TupleType::get(eleTy.getContext(),
67                                 mlir::TypeRange{ptrTy, idxTy});
68   }
69 
70   Marshalling boxcharArgumentType(mlir::Type eleTy, bool sret) const override {
71     CodeGenSpecifics::Marshalling marshal;
72     auto idxTy = mlir::IntegerType::get(eleTy.getContext(), S::defaultWidth);
73     auto ptrTy = fir::ReferenceType::get(eleTy);
74     marshal.emplace_back(ptrTy, AT{});
75     // Return value arguments are grouped as a pair. Others are passed in a
76     // split format with all pointers first (in the declared position) and all
77     // LEN arguments appended after all of the dummy arguments.
78     // NB: Other conventions/ABIs can/should be supported via options.
79     marshal.emplace_back(idxTy, AT{/*alignment=*/0, /*byval=*/false,
80                                    /*sret=*/sret, /*append=*/!sret});
81     return marshal;
82   }
83 
84   CodeGenSpecifics::Marshalling
85   integerArgumentType(mlir::Location loc,
86                       mlir::IntegerType argTy) const override {
87     CodeGenSpecifics::Marshalling marshal;
88     AT::IntegerExtension intExt = AT::IntegerExtension::None;
89     if (argTy.getWidth() < getCIntTypeWidth()) {
90       // isSigned() and isUnsigned() branches below are dead code currently.
91       // If needed, we can generate calls with signed/unsigned argument types
92       // to more precisely match C side (e.g. for Fortran runtime functions
93       // with 'unsigned short' arguments).
94       if (argTy.isSigned())
95         intExt = AT::IntegerExtension::Sign;
96       else if (argTy.isUnsigned())
97         intExt = AT::IntegerExtension::Zero;
98       else if (argTy.isSignless()) {
99         // Zero extend for 'i1' and sign extend for other types.
100         if (argTy.getWidth() == 1)
101           intExt = AT::IntegerExtension::Zero;
102         else
103           intExt = AT::IntegerExtension::Sign;
104       }
105     }
106 
107     marshal.emplace_back(argTy, AT{/*alignment=*/0, /*byval=*/false,
108                                    /*sret=*/false, /*append=*/false,
109                                    /*intExt=*/intExt});
110     return marshal;
111   }
112 
113   CodeGenSpecifics::Marshalling
114   integerReturnType(mlir::Location loc,
115                     mlir::IntegerType argTy) const override {
116     return integerArgumentType(loc, argTy);
117   }
118 
119   // Width of 'int' type is 32-bits for almost all targets, except
120   // for AVR and MSP430 (see TargetInfo initializations
121   // in clang/lib/Basic/Targets).
122   unsigned char getCIntTypeWidth() const override { return 32; }
123 };
124 } // namespace
125 
126 //===----------------------------------------------------------------------===//
127 // i386 (x86 32 bit) linux target specifics.
128 //===----------------------------------------------------------------------===//
129 
130 namespace {
131 struct TargetI386 : public GenericTarget<TargetI386> {
132   using GenericTarget::GenericTarget;
133 
134   static constexpr int defaultWidth = 32;
135 
136   CodeGenSpecifics::Marshalling
137   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
138     assert(fir::isa_real(eleTy));
139     CodeGenSpecifics::Marshalling marshal;
140     // Use a type that will be translated into LLVM as:
141     // { t, t }   struct of 2 eleTy, byval, align 4
142     auto structTy =
143         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
144     marshal.emplace_back(fir::ReferenceType::get(structTy),
145                          AT{/*alignment=*/4, /*byval=*/true});
146     return marshal;
147   }
148 
149   CodeGenSpecifics::Marshalling
150   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
151     assert(fir::isa_real(eleTy));
152     CodeGenSpecifics::Marshalling marshal;
153     const auto *sem = &floatToSemantics(kindMap, eleTy);
154     if (sem == &llvm::APFloat::IEEEsingle()) {
155       // i64   pack both floats in a 64-bit GPR
156       marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64),
157                            AT{});
158     } else if (sem == &llvm::APFloat::IEEEdouble()) {
159       // Use a type that will be translated into LLVM as:
160       // { t, t }   struct of 2 eleTy, sret, align 4
161       auto structTy = mlir::TupleType::get(eleTy.getContext(),
162                                            mlir::TypeRange{eleTy, eleTy});
163       marshal.emplace_back(fir::ReferenceType::get(structTy),
164                            AT{/*alignment=*/4, /*byval=*/false, /*sret=*/true});
165     } else {
166       TODO(loc, "complex for this precision");
167     }
168     return marshal;
169   }
170 };
171 } // namespace
172 
173 //===----------------------------------------------------------------------===//
174 // i386 (x86 32 bit) Windows target specifics.
175 //===----------------------------------------------------------------------===//
176 
177 namespace {
178 struct TargetI386Win : public GenericTarget<TargetI386Win> {
179   using GenericTarget::GenericTarget;
180 
181   static constexpr int defaultWidth = 32;
182 
183   CodeGenSpecifics::Marshalling
184   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
185     CodeGenSpecifics::Marshalling marshal;
186     // Use a type that will be translated into LLVM as:
187     // { t, t }   struct of 2 eleTy, byval, align 4
188     auto structTy =
189         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
190     marshal.emplace_back(fir::ReferenceType::get(structTy),
191                          AT{/*align=*/4, /*byval=*/true});
192     return marshal;
193   }
194 
195   CodeGenSpecifics::Marshalling
196   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
197     CodeGenSpecifics::Marshalling marshal;
198     const auto *sem = &floatToSemantics(kindMap, eleTy);
199     if (sem == &llvm::APFloat::IEEEsingle()) {
200       // i64   pack both floats in a 64-bit GPR
201       marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64),
202                            AT{});
203     } else if (sem == &llvm::APFloat::IEEEdouble()) {
204       // Use a type that will be translated into LLVM as:
205       // { double, double }   struct of 2 double, sret, align 8
206       marshal.emplace_back(
207           fir::ReferenceType::get(mlir::TupleType::get(
208               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
209           AT{/*align=*/8, /*byval=*/false, /*sret=*/true});
210     } else if (sem == &llvm::APFloat::IEEEquad()) {
211       // Use a type that will be translated into LLVM as:
212       // { fp128, fp128 }   struct of 2 fp128, sret, align 16
213       marshal.emplace_back(
214           fir::ReferenceType::get(mlir::TupleType::get(
215               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
216           AT{/*align=*/16, /*byval=*/false, /*sret=*/true});
217     } else if (sem == &llvm::APFloat::x87DoubleExtended()) {
218       // Use a type that will be translated into LLVM as:
219       // { x86_fp80, x86_fp80 }   struct of 2 x86_fp80, sret, align 4
220       marshal.emplace_back(
221           fir::ReferenceType::get(mlir::TupleType::get(
222               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
223           AT{/*align=*/4, /*byval=*/false, /*sret=*/true});
224     } else {
225       TODO(loc, "complex for this precision");
226     }
227     return marshal;
228   }
229 };
230 } // namespace
231 
232 //===----------------------------------------------------------------------===//
233 // x86_64 (x86 64 bit) linux target specifics.
234 //===----------------------------------------------------------------------===//
235 
236 namespace {
237 struct TargetX86_64 : public GenericTarget<TargetX86_64> {
238   using GenericTarget::GenericTarget;
239 
240   static constexpr int defaultWidth = 64;
241 
242   CodeGenSpecifics::Marshalling
243   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
244     CodeGenSpecifics::Marshalling marshal;
245     const auto *sem = &floatToSemantics(kindMap, eleTy);
246     if (sem == &llvm::APFloat::IEEEsingle()) {
247       // <2 x t>   vector of 2 eleTy
248       marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{});
249     } else if (sem == &llvm::APFloat::IEEEdouble()) {
250       // two distinct double arguments
251       marshal.emplace_back(eleTy, AT{});
252       marshal.emplace_back(eleTy, AT{});
253     } else if (sem == &llvm::APFloat::IEEEquad()) {
254       // Use a type that will be translated into LLVM as:
255       // { fp128, fp128 }   struct of 2 fp128, byval, align 16
256       marshal.emplace_back(
257           fir::ReferenceType::get(mlir::TupleType::get(
258               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
259           AT{/*align=*/16, /*byval=*/true});
260     } else {
261       TODO(loc, "complex for this precision");
262     }
263     return marshal;
264   }
265 
266   CodeGenSpecifics::Marshalling
267   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
268     CodeGenSpecifics::Marshalling marshal;
269     const auto *sem = &floatToSemantics(kindMap, eleTy);
270     if (sem == &llvm::APFloat::IEEEsingle()) {
271       // <2 x t>   vector of 2 eleTy
272       marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{});
273     } else if (sem == &llvm::APFloat::IEEEdouble()) {
274       // Use a type that will be translated into LLVM as:
275       // { double, double }   struct of 2 double
276       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
277                                                 mlir::TypeRange{eleTy, eleTy}),
278                            AT{});
279     } else if (sem == &llvm::APFloat::IEEEquad()) {
280       // Use a type that will be translated into LLVM as:
281       // { fp128, fp128 }   struct of 2 fp128, sret, align 16
282       marshal.emplace_back(
283           fir::ReferenceType::get(mlir::TupleType::get(
284               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
285           AT{/*align=*/16, /*byval=*/false, /*sret=*/true});
286     } else {
287       TODO(loc, "complex for this precision");
288     }
289     return marshal;
290   }
291 };
292 } // namespace
293 
294 //===----------------------------------------------------------------------===//
295 // x86_64 (x86 64 bit) Windows target specifics.
296 //===----------------------------------------------------------------------===//
297 
298 namespace {
299 struct TargetX86_64Win : public GenericTarget<TargetX86_64Win> {
300   using GenericTarget::GenericTarget;
301 
302   static constexpr int defaultWidth = 64;
303 
304   CodeGenSpecifics::Marshalling
305   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
306     CodeGenSpecifics::Marshalling marshal;
307     const auto *sem = &floatToSemantics(kindMap, eleTy);
308     if (sem == &llvm::APFloat::IEEEsingle()) {
309       // i64   pack both floats in a 64-bit GPR
310       marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64),
311                            AT{});
312     } else if (sem == &llvm::APFloat::IEEEdouble()) {
313       // Use a type that will be translated into LLVM as:
314       // { double, double }   struct of 2 double, byval, align 8
315       marshal.emplace_back(
316           fir::ReferenceType::get(mlir::TupleType::get(
317               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
318           AT{/*align=*/8, /*byval=*/true});
319     } else if (sem == &llvm::APFloat::IEEEquad() ||
320                sem == &llvm::APFloat::x87DoubleExtended()) {
321       // Use a type that will be translated into LLVM as:
322       // { t, t }   struct of 2 eleTy, byval, align 16
323       marshal.emplace_back(
324           fir::ReferenceType::get(mlir::TupleType::get(
325               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
326           AT{/*align=*/16, /*byval=*/true});
327     } else {
328       TODO(loc, "complex for this precision");
329     }
330     return marshal;
331   }
332 
333   CodeGenSpecifics::Marshalling
334   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
335     CodeGenSpecifics::Marshalling marshal;
336     const auto *sem = &floatToSemantics(kindMap, eleTy);
337     if (sem == &llvm::APFloat::IEEEsingle()) {
338       // i64   pack both floats in a 64-bit GPR
339       marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64),
340                            AT{});
341     } else if (sem == &llvm::APFloat::IEEEdouble()) {
342       // Use a type that will be translated into LLVM as:
343       // { double, double }   struct of 2 double, sret, align 8
344       marshal.emplace_back(
345           fir::ReferenceType::get(mlir::TupleType::get(
346               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
347           AT{/*align=*/8, /*byval=*/false, /*sret=*/true});
348     } else if (sem == &llvm::APFloat::IEEEquad() ||
349                sem == &llvm::APFloat::x87DoubleExtended()) {
350       // Use a type that will be translated into LLVM as:
351       // { t, t }   struct of 2 eleTy, sret, align 16
352       marshal.emplace_back(
353           fir::ReferenceType::get(mlir::TupleType::get(
354               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
355           AT{/*align=*/16, /*byval=*/false, /*sret=*/true});
356     } else {
357       TODO(loc, "complex for this precision");
358     }
359     return marshal;
360   }
361 };
362 } // namespace
363 
364 //===----------------------------------------------------------------------===//
365 // AArch64 linux target specifics.
366 //===----------------------------------------------------------------------===//
367 
368 namespace {
369 struct TargetAArch64 : public GenericTarget<TargetAArch64> {
370   using GenericTarget::GenericTarget;
371 
372   static constexpr int defaultWidth = 64;
373 
374   CodeGenSpecifics::Marshalling
375   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
376     CodeGenSpecifics::Marshalling marshal;
377     const auto *sem = &floatToSemantics(kindMap, eleTy);
378     if (sem == &llvm::APFloat::IEEEsingle() ||
379         sem == &llvm::APFloat::IEEEdouble()) {
380       // [2 x t]   array of 2 eleTy
381       marshal.emplace_back(fir::SequenceType::get({2}, eleTy), AT{});
382     } else {
383       TODO(loc, "complex for this precision");
384     }
385     return marshal;
386   }
387 
388   CodeGenSpecifics::Marshalling
389   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
390     CodeGenSpecifics::Marshalling marshal;
391     const auto *sem = &floatToSemantics(kindMap, eleTy);
392     if (sem == &llvm::APFloat::IEEEsingle() ||
393         sem == &llvm::APFloat::IEEEdouble()) {
394       // Use a type that will be translated into LLVM as:
395       // { t, t }   struct of 2 eleTy
396       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
397                                                 mlir::TypeRange{eleTy, eleTy}),
398                            AT{});
399     } else {
400       TODO(loc, "complex for this precision");
401     }
402     return marshal;
403   }
404 };
405 } // namespace
406 
407 //===----------------------------------------------------------------------===//
408 // PPC64 (AIX 64 bit) target specifics.
409 //===----------------------------------------------------------------------===//
410 
411 namespace {
412 struct TargetPPC64 : public GenericTarget<TargetPPC64> {
413   using GenericTarget::GenericTarget;
414 
415   static constexpr int defaultWidth = 64;
416 
417   CodeGenSpecifics::Marshalling
418   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
419     CodeGenSpecifics::Marshalling marshal;
420     // two distinct element type arguments (re, im)
421     marshal.emplace_back(eleTy, AT{});
422     marshal.emplace_back(eleTy, AT{});
423     return marshal;
424   }
425 
426   CodeGenSpecifics::Marshalling
427   complexReturnType(mlir::Location, mlir::Type eleTy) const override {
428     CodeGenSpecifics::Marshalling marshal;
429     // Use a type that will be translated into LLVM as:
430     // { t, t }   struct of 2 element type
431     marshal.emplace_back(
432         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
433         AT{});
434     return marshal;
435   }
436 };
437 } // namespace
438 
439 //===----------------------------------------------------------------------===//
440 // PPC64le linux target specifics.
441 //===----------------------------------------------------------------------===//
442 
443 namespace {
444 struct TargetPPC64le : public GenericTarget<TargetPPC64le> {
445   using GenericTarget::GenericTarget;
446 
447   static constexpr int defaultWidth = 64;
448 
449   CodeGenSpecifics::Marshalling
450   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
451     CodeGenSpecifics::Marshalling marshal;
452     // two distinct element type arguments (re, im)
453     marshal.emplace_back(eleTy, AT{});
454     marshal.emplace_back(eleTy, AT{});
455     return marshal;
456   }
457 
458   CodeGenSpecifics::Marshalling
459   complexReturnType(mlir::Location, mlir::Type eleTy) const override {
460     CodeGenSpecifics::Marshalling marshal;
461     // Use a type that will be translated into LLVM as:
462     // { t, t }   struct of 2 element type
463     marshal.emplace_back(
464         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
465         AT{});
466     return marshal;
467   }
468 };
469 } // namespace
470 
471 //===----------------------------------------------------------------------===//
472 // sparc (sparc 32 bit) target specifics.
473 //===----------------------------------------------------------------------===//
474 
475 namespace {
476 struct TargetSparc : public GenericTarget<TargetSparc> {
477   using GenericTarget::GenericTarget;
478 
479   static constexpr int defaultWidth = 32;
480 
481   CodeGenSpecifics::Marshalling
482   complexArgumentType(mlir::Location, mlir::Type eleTy) const override {
483     assert(fir::isa_real(eleTy));
484     CodeGenSpecifics::Marshalling marshal;
485     // Use a type that will be translated into LLVM as:
486     // { t, t }   struct of 2 eleTy
487     auto structTy =
488         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
489     marshal.emplace_back(fir::ReferenceType::get(structTy), AT{});
490     return marshal;
491   }
492 
493   CodeGenSpecifics::Marshalling
494   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
495     assert(fir::isa_real(eleTy));
496     CodeGenSpecifics::Marshalling marshal;
497     // Use a type that will be translated into LLVM as:
498     // { t, t }   struct of 2 eleTy, byval
499     auto structTy =
500         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy});
501     marshal.emplace_back(fir::ReferenceType::get(structTy),
502                          AT{/*alignment=*/0, /*byval=*/true});
503     return marshal;
504   }
505 };
506 } // namespace
507 
508 //===----------------------------------------------------------------------===//
509 // sparcv9 (sparc 64 bit) target specifics.
510 //===----------------------------------------------------------------------===//
511 
512 namespace {
513 struct TargetSparcV9 : public GenericTarget<TargetSparcV9> {
514   using GenericTarget::GenericTarget;
515 
516   static constexpr int defaultWidth = 64;
517 
518   CodeGenSpecifics::Marshalling
519   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
520     CodeGenSpecifics::Marshalling marshal;
521     const auto *sem = &floatToSemantics(kindMap, eleTy);
522     if (sem == &llvm::APFloat::IEEEsingle() ||
523         sem == &llvm::APFloat::IEEEdouble()) {
524       // two distinct float, double arguments
525       marshal.emplace_back(eleTy, AT{});
526       marshal.emplace_back(eleTy, AT{});
527     } else if (sem == &llvm::APFloat::IEEEquad()) {
528       // Use a type that will be translated into LLVM as:
529       // { fp128, fp128 }   struct of 2 fp128, byval, align 16
530       marshal.emplace_back(
531           fir::ReferenceType::get(mlir::TupleType::get(
532               eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})),
533           AT{/*align=*/16, /*byval=*/true});
534     } else {
535       TODO(loc, "complex for this precision");
536     }
537     return marshal;
538   }
539 
540   CodeGenSpecifics::Marshalling
541   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
542     CodeGenSpecifics::Marshalling marshal;
543     // Use a type that will be translated into LLVM as:
544     // { eleTy, eleTy }   struct of 2 eleTy
545     marshal.emplace_back(
546         mlir::TupleType::get(eleTy.getContext(), mlir::TypeRange{eleTy, eleTy}),
547         AT{});
548     return marshal;
549   }
550 };
551 } // namespace
552 
553 //===----------------------------------------------------------------------===//
554 // RISCV64 linux target specifics.
555 //===----------------------------------------------------------------------===//
556 
557 namespace {
558 struct TargetRISCV64 : public GenericTarget<TargetRISCV64> {
559   using GenericTarget::GenericTarget;
560 
561   static constexpr int defaultWidth = 64;
562 
563   CodeGenSpecifics::Marshalling
564   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
565     CodeGenSpecifics::Marshalling marshal;
566     const auto *sem = &floatToSemantics(kindMap, eleTy);
567     if (sem == &llvm::APFloat::IEEEsingle() ||
568         sem == &llvm::APFloat::IEEEdouble()) {
569       // Two distinct element type arguments (re, im)
570       marshal.emplace_back(eleTy, AT{});
571       marshal.emplace_back(eleTy, AT{});
572     } else {
573       TODO(loc, "complex for this precision");
574     }
575     return marshal;
576   }
577 
578   CodeGenSpecifics::Marshalling
579   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
580     CodeGenSpecifics::Marshalling marshal;
581     const auto *sem = &floatToSemantics(kindMap, eleTy);
582     if (sem == &llvm::APFloat::IEEEsingle() ||
583         sem == &llvm::APFloat::IEEEdouble()) {
584       // Use a type that will be translated into LLVM as:
585       // { t, t }   struct of 2 eleTy, byVal
586       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
587                                                 mlir::TypeRange{eleTy, eleTy}),
588                            AT{/*alignment=*/0, /*byval=*/true});
589     } else {
590       TODO(loc, "complex for this precision");
591     }
592     return marshal;
593   }
594 };
595 } // namespace
596 
597 //===----------------------------------------------------------------------===//
598 // AMDGPU linux target specifics.
599 //===----------------------------------------------------------------------===//
600 
601 namespace {
602 struct TargetAMDGPU : public GenericTarget<TargetAMDGPU> {
603   using GenericTarget::GenericTarget;
604 
605   // Default size (in bits) of the index type for strings.
606   static constexpr int defaultWidth = 64;
607 
608   CodeGenSpecifics::Marshalling
609   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
610     CodeGenSpecifics::Marshalling marshal;
611     TODO(loc, "handle complex argument types");
612     return marshal;
613   }
614 
615   CodeGenSpecifics::Marshalling
616   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
617     CodeGenSpecifics::Marshalling marshal;
618     TODO(loc, "handle complex return types");
619     return marshal;
620   }
621 };
622 } // namespace
623 
624 //===----------------------------------------------------------------------===//
625 // NVPTX linux target specifics.
626 //===----------------------------------------------------------------------===//
627 
628 namespace {
629 struct TargetNVPTX : public GenericTarget<TargetNVPTX> {
630   using GenericTarget::GenericTarget;
631 
632   // Default size (in bits) of the index type for strings.
633   static constexpr int defaultWidth = 64;
634 
635   CodeGenSpecifics::Marshalling
636   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
637     CodeGenSpecifics::Marshalling marshal;
638     TODO(loc, "handle complex argument types");
639     return marshal;
640   }
641 
642   CodeGenSpecifics::Marshalling
643   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
644     CodeGenSpecifics::Marshalling marshal;
645     TODO(loc, "handle complex return types");
646     return marshal;
647   }
648 };
649 } // namespace
650 
651 //===----------------------------------------------------------------------===//
652 // LoongArch64 linux target specifics.
653 //===----------------------------------------------------------------------===//
654 
655 namespace {
656 struct TargetLoongArch64 : public GenericTarget<TargetLoongArch64> {
657   using GenericTarget::GenericTarget;
658 
659   static constexpr int defaultWidth = 64;
660 
661   CodeGenSpecifics::Marshalling
662   complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override {
663     CodeGenSpecifics::Marshalling marshal;
664     const auto *sem = &floatToSemantics(kindMap, eleTy);
665     if (sem == &llvm::APFloat::IEEEsingle() ||
666         sem == &llvm::APFloat::IEEEdouble()) {
667       // Two distinct element type arguments (re, im)
668       marshal.emplace_back(eleTy, AT{});
669       marshal.emplace_back(eleTy, AT{});
670     } else {
671       TODO(loc, "complex for this precision");
672     }
673     return marshal;
674   }
675 
676   CodeGenSpecifics::Marshalling
677   complexReturnType(mlir::Location loc, mlir::Type eleTy) const override {
678     CodeGenSpecifics::Marshalling marshal;
679     const auto *sem = &floatToSemantics(kindMap, eleTy);
680     if (sem == &llvm::APFloat::IEEEsingle() ||
681         sem == &llvm::APFloat::IEEEdouble()) {
682       // Use a type that will be translated into LLVM as:
683       // { t, t }   struct of 2 eleTy, byVal
684       marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(),
685                                                 mlir::TypeRange{eleTy, eleTy}),
686                            AT{/*alignment=*/0, /*byval=*/true});
687     } else {
688       TODO(loc, "complex for this precision");
689     }
690     return marshal;
691   }
692 };
693 } // namespace
694 
695 // Instantiate the overloaded target instance based on the triple value.
696 // TODO: Add other targets to this file as needed.
697 std::unique_ptr<fir::CodeGenSpecifics>
698 fir::CodeGenSpecifics::get(mlir::MLIRContext *ctx, llvm::Triple &&trp,
699                            KindMapping &&kindMap) {
700   switch (trp.getArch()) {
701   default:
702     break;
703   case llvm::Triple::ArchType::x86:
704     if (trp.isOSWindows())
705       return std::make_unique<TargetI386Win>(ctx, std::move(trp),
706                                              std::move(kindMap));
707     else
708       return std::make_unique<TargetI386>(ctx, std::move(trp),
709                                           std::move(kindMap));
710   case llvm::Triple::ArchType::x86_64:
711     if (trp.isOSWindows())
712       return std::make_unique<TargetX86_64Win>(ctx, std::move(trp),
713                                                std::move(kindMap));
714     else
715       return std::make_unique<TargetX86_64>(ctx, std::move(trp),
716                                             std::move(kindMap));
717   case llvm::Triple::ArchType::aarch64:
718     return std::make_unique<TargetAArch64>(ctx, std::move(trp),
719                                            std::move(kindMap));
720   case llvm::Triple::ArchType::ppc64:
721     return std::make_unique<TargetPPC64>(ctx, std::move(trp),
722                                          std::move(kindMap));
723   case llvm::Triple::ArchType::ppc64le:
724     return std::make_unique<TargetPPC64le>(ctx, std::move(trp),
725                                            std::move(kindMap));
726   case llvm::Triple::ArchType::sparc:
727     return std::make_unique<TargetSparc>(ctx, std::move(trp),
728                                          std::move(kindMap));
729   case llvm::Triple::ArchType::sparcv9:
730     return std::make_unique<TargetSparcV9>(ctx, std::move(trp),
731                                            std::move(kindMap));
732   case llvm::Triple::ArchType::riscv64:
733     return std::make_unique<TargetRISCV64>(ctx, std::move(trp),
734                                            std::move(kindMap));
735   case llvm::Triple::ArchType::amdgcn:
736     return std::make_unique<TargetAMDGPU>(ctx, std::move(trp),
737                                           std::move(kindMap));
738   case llvm::Triple::ArchType::nvptx64:
739     return std::make_unique<TargetNVPTX>(ctx, std::move(trp),
740                                          std::move(kindMap));
741   case llvm::Triple::ArchType::loongarch64:
742     return std::make_unique<TargetLoongArch64>(ctx, std::move(trp),
743                                                std::move(kindMap));
744   }
745   TODO(mlir::UnknownLoc::get(ctx), "target not implemented");
746 }
747