xref: /llvm-project/flang/lib/Lower/OpenMP/Utils.cpp (revision 662133a278f4f3553f061f7999759bae4e842820)
1 //===-- Utils..cpp ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Utils.h"
14 
15 #include "Clauses.h"
16 
17 #include <flang/Lower/AbstractConverter.h>
18 #include <flang/Lower/ConvertType.h>
19 #include <flang/Lower/DirectivesCommon.h>
20 #include <flang/Lower/PFTBuilder.h>
21 #include <flang/Optimizer/Builder/FIRBuilder.h>
22 #include <flang/Optimizer/Builder/Todo.h>
23 #include <flang/Parser/parse-tree.h>
24 #include <flang/Parser/tools.h>
25 #include <flang/Semantics/tools.h>
26 #include <llvm/Support/CommandLine.h>
27 
28 #include <iterator>
29 
30 llvm::cl::opt<bool> treatIndexAsSection(
31     "openmp-treat-index-as-section",
32     llvm::cl::desc("In the OpenMP data clauses treat `a(N)` as `a(N:N)`."),
33     llvm::cl::init(true));
34 
35 llvm::cl::opt<bool> enableDelayedPrivatization(
36     "openmp-enable-delayed-privatization",
37     llvm::cl::desc(
38         "Emit `[first]private` variables as clauses on the MLIR ops."),
39     llvm::cl::init(true));
40 
41 llvm::cl::opt<bool> enableDelayedPrivatizationStaging(
42     "openmp-enable-delayed-privatization-staging",
43     llvm::cl::desc("For partially supported constructs, emit `[first]private` "
44                    "variables as clauses on the MLIR ops."),
45     llvm::cl::init(false));
46 
47 namespace Fortran {
48 namespace lower {
49 namespace omp {
50 
51 int64_t getCollapseValue(const List<Clause> &clauses) {
52   auto iter = llvm::find_if(clauses, [](const Clause &clause) {
53     return clause.id == llvm::omp::Clause::OMPC_collapse;
54   });
55   if (iter != clauses.end()) {
56     const auto &collapse = std::get<clause::Collapse>(iter->u);
57     return evaluate::ToInt64(collapse.v).value();
58   }
59   return 1;
60 }
61 
62 void genObjectList(const ObjectList &objects,
63                    lower::AbstractConverter &converter,
64                    llvm::SmallVectorImpl<mlir::Value> &operands) {
65   for (const Object &object : objects) {
66     const semantics::Symbol *sym = object.sym();
67     assert(sym && "Expected Symbol");
68     if (mlir::Value variable = converter.getSymbolAddress(*sym)) {
69       operands.push_back(variable);
70     } else if (const auto *details =
71                    sym->detailsIf<semantics::HostAssocDetails>()) {
72       operands.push_back(converter.getSymbolAddress(details->symbol()));
73       converter.copySymbolBinding(details->symbol(), *sym);
74     }
75   }
76 }
77 
78 mlir::Type getLoopVarType(lower::AbstractConverter &converter,
79                           std::size_t loopVarTypeSize) {
80   // OpenMP runtime requires 32-bit or 64-bit loop variables.
81   loopVarTypeSize = loopVarTypeSize * 8;
82   if (loopVarTypeSize < 32) {
83     loopVarTypeSize = 32;
84   } else if (loopVarTypeSize > 64) {
85     loopVarTypeSize = 64;
86     mlir::emitWarning(converter.getCurrentLocation(),
87                       "OpenMP loop iteration variable cannot have more than 64 "
88                       "bits size and will be narrowed into 64 bits.");
89   }
90   assert((loopVarTypeSize == 32 || loopVarTypeSize == 64) &&
91          "OpenMP loop iteration variable size must be transformed into 32-bit "
92          "or 64-bit");
93   return converter.getFirOpBuilder().getIntegerType(loopVarTypeSize);
94 }
95 
96 semantics::Symbol *
97 getIterationVariableSymbol(const lower::pft::Evaluation &eval) {
98   return eval.visit(common::visitors{
99       [&](const parser::DoConstruct &doLoop) {
100         if (const auto &maybeCtrl = doLoop.GetLoopControl()) {
101           using LoopControl = parser::LoopControl;
102           if (auto *bounds = std::get_if<LoopControl::Bounds>(&maybeCtrl->u)) {
103             static_assert(std::is_same_v<decltype(bounds->name),
104                                          parser::Scalar<parser::Name>>);
105             return bounds->name.thing.symbol;
106           }
107         }
108         return static_cast<semantics::Symbol *>(nullptr);
109       },
110       [](auto &&) { return static_cast<semantics::Symbol *>(nullptr); },
111   });
112 }
113 
114 void gatherFuncAndVarSyms(
115     const ObjectList &objects, mlir::omp::DeclareTargetCaptureClause clause,
116     llvm::SmallVectorImpl<DeclareTargetCapturePair> &symbolAndClause) {
117   for (const Object &object : objects)
118     symbolAndClause.emplace_back(clause, *object.sym());
119 }
120 
121 mlir::omp::MapInfoOp
122 createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc,
123                 mlir::Value baseAddr, mlir::Value varPtrPtr,
124                 llvm::StringRef name, llvm::ArrayRef<mlir::Value> bounds,
125                 llvm::ArrayRef<mlir::Value> members,
126                 mlir::ArrayAttr membersIndex, uint64_t mapType,
127                 mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy,
128                 bool partialMap) {
129   if (auto boxTy = llvm::dyn_cast<fir::BaseBoxType>(baseAddr.getType())) {
130     baseAddr = builder.create<fir::BoxAddrOp>(loc, baseAddr);
131     retTy = baseAddr.getType();
132   }
133 
134   mlir::TypeAttr varType = mlir::TypeAttr::get(
135       llvm::cast<mlir::omp::PointerLikeType>(retTy).getElementType());
136 
137   // For types with unknown extents such as <2x?xi32> we discard the incomplete
138   // type info and only retain the base type. The correct dimensions are later
139   // recovered through the bounds info.
140   if (auto seqType = llvm::dyn_cast<fir::SequenceType>(varType.getValue()))
141     if (seqType.hasDynamicExtents())
142       varType = mlir::TypeAttr::get(seqType.getEleTy());
143 
144   mlir::omp::MapInfoOp op = builder.create<mlir::omp::MapInfoOp>(
145       loc, retTy, baseAddr, varType, varPtrPtr, members, membersIndex, bounds,
146       builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
147       builder.getAttr<mlir::omp::VariableCaptureKindAttr>(mapCaptureType),
148       builder.getStringAttr(name), builder.getBoolAttr(partialMap));
149   return op;
150 }
151 
152 // This function gathers the individual omp::Object's that make up a
153 // larger omp::Object symbol.
154 //
155 // For example, provided the larger symbol: "parent%child%member", this
156 // function breaks it up into its constituent components ("parent",
157 // "child", "member"), so we can access each individual component and
158 // introspect details. Important to note is this function breaks it up from
159 // RHS to LHS ("member" to "parent") and then we reverse it so that the
160 // returned omp::ObjectList is LHS to RHS, with the "parent" at the
161 // beginning.
162 omp::ObjectList gatherObjectsOf(omp::Object derivedTypeMember,
163                                 semantics::SemanticsContext &semaCtx) {
164   omp::ObjectList objList;
165   std::optional<omp::Object> baseObj = derivedTypeMember;
166   while (baseObj.has_value()) {
167     objList.push_back(baseObj.value());
168     baseObj = getBaseObject(baseObj.value(), semaCtx);
169   }
170   return omp::ObjectList{llvm::reverse(objList)};
171 }
172 
173 // This function generates a series of indices from a provided omp::Object,
174 // that devolves to an ArrayRef symbol, e.g. "array(2,3,4)", this function
175 // would generate a series of indices of "[1][2][3]" for the above example,
176 // offsetting by -1 to account for the non-zero fortran indexes.
177 //
178 // These indices can then be provided to a coordinate operation or other
179 // GEP-like operation to access the relevant positional member of the
180 // array.
181 //
182 // It is of note that the function only supports subscript integers currently
183 // and not Triplets i.e. Array(1:2:3).
184 static void generateArrayIndices(lower::AbstractConverter &converter,
185                                  fir::FirOpBuilder &firOpBuilder,
186                                  lower::StatementContext &stmtCtx,
187                                  mlir::Location clauseLocation,
188                                  llvm::SmallVectorImpl<mlir::Value> &indices,
189                                  omp::Object object) {
190   auto maybeRef = evaluate::ExtractDataRef(*object.ref());
191   if (!maybeRef)
192     return;
193 
194   auto *arr = std::get_if<evaluate::ArrayRef>(&maybeRef->u);
195   if (!arr)
196     return;
197 
198   for (auto v : arr->subscript()) {
199     if (std::holds_alternative<Triplet>(v.u))
200       TODO(clauseLocation, "Triplet indexing in map clause is unsupported");
201 
202     auto expr = std::get<Fortran::evaluate::IndirectSubscriptIntegerExpr>(v.u);
203     mlir::Value subscript =
204         fir::getBase(converter.genExprValue(toEvExpr(expr.value()), stmtCtx));
205     mlir::Value one = firOpBuilder.createIntegerConstant(
206         clauseLocation, firOpBuilder.getIndexType(), 1);
207     subscript = firOpBuilder.createConvert(
208         clauseLocation, firOpBuilder.getIndexType(), subscript);
209     indices.push_back(firOpBuilder.create<mlir::arith::SubIOp>(clauseLocation,
210                                                                subscript, one));
211   }
212 }
213 
214 /// When mapping members of derived types, there is a chance that one of the
215 /// members along the way to a mapped member is an descriptor. In which case
216 /// we have to make sure we generate a map for those along the way otherwise
217 /// we will be missing a chunk of data required to actually map the member
218 /// type to device. This function effectively generates these maps and the
219 /// appropriate data accesses required to generate these maps. It will avoid
220 /// creating duplicate maps, as duplicates are just as bad as unmapped
221 /// descriptor data in a lot of cases for the runtime (and unnecessary
222 /// data movement should be avoided where possible).
223 ///
224 /// As an example for the following mapping:
225 ///
226 /// type :: vertexes
227 ///     integer(4), allocatable :: vertexx(:)
228 ///     integer(4), allocatable :: vertexy(:)
229 /// end type vertexes
230 ///
231 /// type :: dtype
232 ///     real(4) :: i
233 ///     type(vertexes), allocatable :: vertexes(:)
234 /// end type dtype
235 ///
236 /// type(dtype), allocatable :: alloca_dtype
237 ///
238 /// !$omp target map(tofrom: alloca_dtype%vertexes(N1)%vertexx)
239 ///
240 /// The below HLFIR/FIR is generated (trimmed for conciseness):
241 ///
242 /// On the first iteration we index into the record type alloca_dtype
243 /// to access "vertexes", we then generate a map for this descriptor
244 /// alongside bounds to indicate we only need the 1 member, rather than
245 /// the whole array block in this case (In theory we could map its
246 /// entirety at the cost of data transfer bandwidth).
247 ///
248 /// %13:2 = hlfir.declare ... "alloca_dtype" ...
249 /// %39 = fir.load %13#0 : ...
250 /// %40 = fir.coordinate_of %39, %c1 : ...
251 /// %51 = omp.map.info var_ptr(%40 : ...) map_clauses(to) capture(ByRef) ...
252 /// %52 = fir.load %40 : ...
253 ///
254 /// Second iteration generating access to "vertexes(N1) utilising the N1 index
255 /// %53 = load N1 ...
256 /// %54 = fir.convert %53 : (i32) -> i64
257 /// %55 = fir.convert %54 : (i64) -> index
258 /// %56 = arith.subi %55, %c1 : index
259 /// %57 = fir.coordinate_of %52, %56 : ...
260 ///
261 /// Still in the second iteration we access the allocatable member "vertexx",
262 /// we return %58 from the function and provide it to the final and "main"
263 /// map of processMap (generated by the record type segment of the below
264 /// function), if this were not the final symbol in the list, i.e. we accessed
265 /// a member below vertexx, we would have generated the map below as we did in
266 /// the first iteration and then continue to generate further coordinates to
267 /// access further components as required.
268 ///
269 /// %58 = fir.coordinate_of %57, %c0 : ...
270 /// %61 = omp.map.info var_ptr(%58 : ...) map_clauses(to) capture(ByRef) ...
271 ///
272 /// Parent mapping containing prior generated mapped members, generated at
273 /// a later step but here to showcase the "end" result
274 ///
275 /// omp.map.info var_ptr(%13#1 : ...) map_clauses(to) capture(ByRef)
276 ///   members(%50, %61 : [0, 1, 0], [0, 1, 0] : ...
277 ///
278 /// \param objectList - The list of omp::Object symbol data for each parent
279 ///  to the mapped member (also includes the mapped member), generated via
280 ///  gatherObjectsOf.
281 /// \param indices - List of index data associated with the mapped member
282 ///   symbol, which identifies the placement of the member in its parent,
283 ///   this helps generate the appropriate member accesses. These indices
284 ///   can be generated via generateMemberPlacementIndices.
285 /// \param asFortran - A string generated from the mapped variable to be
286 ///   associated with the main map, generally (but not restricted to)
287 ///   generated via gatherDataOperandAddrAndBounds or other
288 ///   DirectiveCommons.hpp utilities.
289 /// \param mapTypeBits - The map flags that will be associated with the
290 ///   generated maps, minus alterations of the TO and FROM bits for the
291 ///   intermediate components to prevent accidental overwriting on device
292 ///   write back.
293 mlir::Value createParentSymAndGenIntermediateMaps(
294     mlir::Location clauseLocation, lower::AbstractConverter &converter,
295     semantics::SemanticsContext &semaCtx, lower::StatementContext &stmtCtx,
296     omp::ObjectList &objectList, llvm::SmallVectorImpl<int64_t> &indices,
297     OmpMapParentAndMemberData &parentMemberIndices, llvm::StringRef asFortran,
298     llvm::omp::OpenMPOffloadMappingFlags mapTypeBits) {
299   fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
300 
301   /// Checks if an omp::Object is an array expression with a subscript, e.g.
302   /// array(1,2).
303   auto isArrayExprWithSubscript = [](omp::Object obj) {
304     if (auto maybeRef = evaluate::ExtractDataRef(*obj.ref())) {
305       evaluate::DataRef ref = *maybeRef;
306       if (auto *arr = std::get_if<evaluate::ArrayRef>(&ref.u))
307         return !arr->subscript().empty();
308     }
309     return false;
310   };
311 
312   // Generate the access to the original parent base address.
313   fir::factory::AddrAndBoundsInfo parentBaseAddr =
314       lower::getDataOperandBaseAddr(converter, firOpBuilder,
315                                     *objectList[0].sym(), clauseLocation);
316   mlir::Value curValue = parentBaseAddr.addr;
317 
318   // Iterate over all objects in the objectList, this should consist of all
319   // record types between the parent and the member being mapped (including
320   // the parent). The object list may also contain array objects as well,
321   // this can occur when specifying bounds or a specific element access
322   // within a member map, we skip these.
323   size_t currentIndicesIdx = 0;
324   for (size_t i = 0; i < objectList.size(); ++i) {
325     // If we encounter a sequence type, i.e. an array, we must generate the
326     // correct coordinate operation to index into the array to proceed further,
327     // this is only relevant in cases where we encounter subscripts currently.
328     //
329     // For example in the following case:
330     //
331     //   map(tofrom: array_dtype(4)%internal_dtypes(3)%float_elements(4))
332     //
333     // We must generate coordinate operation accesses for each subscript
334     // we encounter.
335     if (fir::SequenceType arrType = mlir::dyn_cast<fir::SequenceType>(
336             fir::unwrapPassByRefType(curValue.getType()))) {
337       if (isArrayExprWithSubscript(objectList[i])) {
338         llvm::SmallVector<mlir::Value> subscriptIndices;
339         generateArrayIndices(converter, firOpBuilder, stmtCtx, clauseLocation,
340                              subscriptIndices, objectList[i]);
341         assert(!subscriptIndices.empty() &&
342                "missing expected indices for map clause");
343         curValue = firOpBuilder.create<fir::CoordinateOp>(
344             clauseLocation, firOpBuilder.getRefType(arrType.getEleTy()),
345             curValue, subscriptIndices);
346       }
347     }
348 
349     // If we encounter a record type, we must access the subsequent member
350     // by indexing into it and creating a coordinate operation to do so, we
351     // utilise the index information generated previously and passed in to
352     // work out the correct member to access and the corresponding member
353     // type.
354     if (fir::RecordType recordType = mlir::dyn_cast<fir::RecordType>(
355             fir::unwrapPassByRefType(curValue.getType()))) {
356       mlir::Value idxConst = firOpBuilder.createIntegerConstant(
357           clauseLocation, firOpBuilder.getIndexType(),
358           indices[currentIndicesIdx]);
359       mlir::Type memberTy =
360           recordType.getTypeList().at(indices[currentIndicesIdx]).second;
361       curValue = firOpBuilder.create<fir::CoordinateOp>(
362           clauseLocation, firOpBuilder.getRefType(memberTy), curValue,
363           idxConst);
364 
365       // Skip mapping and the subsequent load if we're the final member or not
366       // a type with a descriptor such as a pointer/allocatable. If we're a
367       // final member, the map will be generated by the processMap call that
368       // invoked this function, and if we're not a type with a descriptor then
369       // we have no need of generating an intermediate map for it, as we only
370       // need to generate a map if a member is a descriptor type (and thus
371       // obscures the members it contains via a pointer in which it's data needs
372       // mapped)
373       if ((currentIndicesIdx == indices.size() - 1) ||
374           !fir::isTypeWithDescriptor(memberTy)) {
375         currentIndicesIdx++;
376         continue;
377       }
378 
379       llvm::SmallVector<int64_t> interimIndices(
380           indices.begin(), std::next(indices.begin(), currentIndicesIdx + 1));
381       // Verify we haven't already created a map for this particular member, by
382       // checking the list of members already mapped for the current parent,
383       // stored in the parentMemberIndices structure
384       if (!parentMemberIndices.isDuplicateMemberMapInfo(interimIndices)) {
385         // Generate bounds operations using the standard lowering utility,
386         // unfortunately this currently does a bit more than just generate
387         // bounds and we discard the other bits. May be useful to extend the
388         // utility to just provide bounds in the future.
389         llvm::SmallVector<mlir::Value> interimBounds;
390         if (i + 1 < objectList.size() &&
391             objectList[i + 1].sym()->IsObjectArray()) {
392           std::stringstream interimFortran;
393           Fortran::lower::gatherDataOperandAddrAndBounds<
394               mlir::omp::MapBoundsOp, mlir::omp::MapBoundsType>(
395               converter, converter.getFirOpBuilder(), semaCtx,
396               converter.getFctCtx(), *objectList[i + 1].sym(),
397               objectList[i + 1].ref(), clauseLocation, interimFortran,
398               interimBounds, treatIndexAsSection);
399         }
400 
401         // Remove all map TO, FROM and TOFROM bits, from the intermediate
402         // allocatable maps, we simply wish to alloc or release them. It may be
403         // safer to just pass OMP_MAP_NONE as the map type, but we may still
404         // need some of the other map types the mapped member utilises, so for
405         // now it's good to keep an eye on this.
406         llvm::omp::OpenMPOffloadMappingFlags interimMapType = mapTypeBits;
407         interimMapType &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
408         interimMapType &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
409 
410         // Create a map for the intermediate member and insert it and it's
411         // indices into the parentMemberIndices list to track it.
412         mlir::omp::MapInfoOp mapOp = createMapInfoOp(
413             firOpBuilder, clauseLocation, curValue,
414             /*varPtrPtr=*/mlir::Value{}, asFortran,
415             /*bounds=*/interimBounds,
416             /*members=*/{},
417             /*membersIndex=*/mlir::ArrayAttr{},
418             static_cast<
419                 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
420                 interimMapType),
421             mlir::omp::VariableCaptureKind::ByRef, curValue.getType());
422 
423         parentMemberIndices.memberPlacementIndices.push_back(interimIndices);
424         parentMemberIndices.memberMap.push_back(mapOp);
425       }
426 
427       // Load the currently accessed member, so we can continue to access
428       // further segments.
429       curValue = firOpBuilder.create<fir::LoadOp>(clauseLocation, curValue);
430       currentIndicesIdx++;
431     }
432   }
433 
434   return curValue;
435 }
436 
437 static int64_t
438 getComponentPlacementInParent(const semantics::Symbol *componentSym) {
439   const auto *derived = componentSym->owner()
440                             .derivedTypeSpec()
441                             ->typeSymbol()
442                             .detailsIf<semantics::DerivedTypeDetails>();
443   assert(derived &&
444          "expected derived type details when processing component symbol");
445   for (auto [placement, name] : llvm::enumerate(derived->componentNames()))
446     if (name == componentSym->name())
447       return placement;
448   return -1;
449 }
450 
451 static std::optional<Object>
452 getComponentObject(std::optional<Object> object,
453                    semantics::SemanticsContext &semaCtx) {
454   if (!object)
455     return std::nullopt;
456 
457   auto ref = evaluate::ExtractDataRef(*object.value().ref());
458   if (!ref)
459     return std::nullopt;
460 
461   if (std::holds_alternative<evaluate::Component>(ref->u))
462     return object;
463 
464   auto baseObj = getBaseObject(object.value(), semaCtx);
465   if (!baseObj)
466     return std::nullopt;
467 
468   return getComponentObject(baseObj.value(), semaCtx);
469 }
470 
471 void generateMemberPlacementIndices(const Object &object,
472                                     llvm::SmallVectorImpl<int64_t> &indices,
473                                     semantics::SemanticsContext &semaCtx) {
474   assert(indices.empty() && "indices vector passed to "
475                             "generateMemberPlacementIndices should be empty");
476   auto compObj = getComponentObject(object, semaCtx);
477 
478   while (compObj) {
479     int64_t index = getComponentPlacementInParent(compObj->sym());
480     assert(
481         index >= 0 &&
482         "unexpected index value returned from getComponentPlacementInParent");
483     indices.push_back(index);
484     compObj =
485         getComponentObject(getBaseObject(compObj.value(), semaCtx), semaCtx);
486   }
487 
488   indices = llvm::SmallVector<int64_t>{llvm::reverse(indices)};
489 }
490 
491 void OmpMapParentAndMemberData::addChildIndexAndMapToParent(
492     const omp::Object &object, mlir::omp::MapInfoOp &mapOp,
493     semantics::SemanticsContext &semaCtx) {
494   llvm::SmallVector<int64_t> indices;
495   generateMemberPlacementIndices(object, indices, semaCtx);
496   memberPlacementIndices.push_back(indices);
497   memberMap.push_back(mapOp);
498 }
499 
500 bool isMemberOrParentAllocatableOrPointer(
501     const Object &object, semantics::SemanticsContext &semaCtx) {
502   if (semantics::IsAllocatableOrObjectPointer(object.sym()))
503     return true;
504 
505   auto compObj = getBaseObject(object, semaCtx);
506   while (compObj) {
507     if (semantics::IsAllocatableOrObjectPointer(compObj.value().sym()))
508       return true;
509     compObj = getBaseObject(compObj.value(), semaCtx);
510   }
511 
512   return false;
513 }
514 
515 void insertChildMapInfoIntoParent(
516     lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx,
517     lower::StatementContext &stmtCtx,
518     std::map<Object, OmpMapParentAndMemberData> &parentMemberIndices,
519     llvm::SmallVectorImpl<mlir::Value> &mapOperands,
520     llvm::SmallVectorImpl<const semantics::Symbol *> &mapSyms) {
521   fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
522   for (auto indices : parentMemberIndices) {
523     auto *parentIter =
524         llvm::find_if(mapSyms, [&indices](const semantics::Symbol *v) {
525           return v == indices.first.sym();
526         });
527     if (parentIter != mapSyms.end()) {
528       auto mapOp = llvm::cast<mlir::omp::MapInfoOp>(
529           mapOperands[std::distance(mapSyms.begin(), parentIter)]
530               .getDefiningOp());
531 
532       // NOTE: To maintain appropriate SSA ordering, we move the parent map
533       // which will now have references to its children after the last
534       // of its members to be generated. This is necessary when a user
535       // has defined a series of parent and children maps where the parent
536       // precedes the children. An alternative, may be to do
537       // delayed generation of map info operations from the clauses and
538       // organize them first before generation. Or to use the
539       // topologicalSort utility which will enforce a stronger SSA
540       // dominance ordering at the cost of efficiency/time.
541       mapOp->moveAfter(indices.second.memberMap.back());
542 
543       for (mlir::omp::MapInfoOp memberMap : indices.second.memberMap)
544         mapOp.getMembersMutable().append(memberMap.getResult());
545 
546       mapOp.setMembersIndexAttr(firOpBuilder.create2DI64ArrayAttr(
547           indices.second.memberPlacementIndices));
548     } else {
549       // NOTE: We take the map type of the first child, this may not
550       // be the correct thing to do, however, we shall see. For the moment
551       // it allows this to work with enter and exit without causing MLIR
552       // verification issues. The more appropriate thing may be to take
553       // the "main" map type clause from the directive being used.
554       uint64_t mapType = indices.second.memberMap[0].getMapType().value_or(0);
555 
556       llvm::SmallVector<mlir::Value> members;
557       members.reserve(indices.second.memberMap.size());
558       for (mlir::omp::MapInfoOp memberMap : indices.second.memberMap)
559         members.push_back(memberMap.getResult());
560 
561       // Create parent to emplace and bind members
562       llvm::SmallVector<mlir::Value> bounds;
563       std::stringstream asFortran;
564       fir::factory::AddrAndBoundsInfo info =
565           lower::gatherDataOperandAddrAndBounds<mlir::omp::MapBoundsOp,
566                                                 mlir::omp::MapBoundsType>(
567               converter, firOpBuilder, semaCtx, converter.getFctCtx(),
568               *indices.first.sym(), indices.first.ref(),
569               converter.getCurrentLocation(), asFortran, bounds,
570               treatIndexAsSection);
571 
572       mlir::omp::MapInfoOp mapOp = createMapInfoOp(
573           firOpBuilder, info.rawInput.getLoc(), info.rawInput,
574           /*varPtrPtr=*/mlir::Value(), asFortran.str(), bounds, members,
575           firOpBuilder.create2DI64ArrayAttr(
576               indices.second.memberPlacementIndices),
577           mapType, mlir::omp::VariableCaptureKind::ByRef,
578           info.rawInput.getType(),
579           /*partialMap=*/true);
580 
581       mapOperands.push_back(mapOp);
582       mapSyms.push_back(indices.first.sym());
583     }
584   }
585 }
586 
587 void lastprivateModifierNotSupported(const omp::clause::Lastprivate &lastp,
588                                      mlir::Location loc) {
589   using Lastprivate = omp::clause::Lastprivate;
590   auto &maybeMod =
591       std::get<std::optional<Lastprivate::LastprivateModifier>>(lastp.t);
592   if (maybeMod) {
593     assert(*maybeMod == Lastprivate::LastprivateModifier::Conditional &&
594            "Unexpected lastprivate modifier");
595     TODO(loc, "lastprivate clause with CONDITIONAL modifier");
596   }
597 }
598 
599 } // namespace omp
600 } // namespace lower
601 } // namespace Fortran
602