xref: /llvm-project/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp (revision 662133a278f4f3553f061f7999759bae4e842820)
1 //===- MapInfoFinalization.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 //===----------------------------------------------------------------------===//
10 /// \file
11 /// An OpenMP dialect related pass for FIR/HLFIR which performs some
12 /// pre-processing of MapInfoOp's after the module has been lowered to
13 /// finalize them.
14 ///
15 /// For example, it expands MapInfoOp's containing descriptor related
16 /// types (fir::BoxType's) into multiple MapInfoOp's containing the parent
17 /// descriptor and pointer member components for individual mapping,
18 /// treating the descriptor type as a record type for later lowering in the
19 /// OpenMP dialect.
20 ///
21 /// The pass also adds MapInfoOp's that are members of a parent object but are
22 /// not directly used in the body of a target region to its BlockArgument list
23 /// to maintain consistency across all MapInfoOp's tied to a region directly or
24 /// indirectly via a parent object.
25 //===----------------------------------------------------------------------===//
26 
27 #include "flang/Optimizer/Builder/DirectivesCommon.h"
28 #include "flang/Optimizer/Builder/FIRBuilder.h"
29 #include "flang/Optimizer/Builder/HLFIRTools.h"
30 #include "flang/Optimizer/Dialect/FIRType.h"
31 #include "flang/Optimizer/Dialect/Support/KindMapping.h"
32 #include "flang/Optimizer/HLFIR/HLFIROps.h"
33 #include "flang/Optimizer/OpenMP/Passes.h"
34 #include "mlir/Analysis/SliceAnalysis.h"
35 #include "mlir/Dialect/Func/IR/FuncOps.h"
36 #include "mlir/Dialect/OpenMP/OpenMPDialect.h"
37 #include "mlir/IR/BuiltinDialect.h"
38 #include "mlir/IR/BuiltinOps.h"
39 #include "mlir/IR/Operation.h"
40 #include "mlir/IR/SymbolTable.h"
41 #include "mlir/Pass/Pass.h"
42 #include "mlir/Support/LLVM.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/Frontend/OpenMP/OMPConstants.h"
45 #include <algorithm>
46 #include <cstddef>
47 #include <iterator>
48 #include <numeric>
49 
50 namespace flangomp {
51 #define GEN_PASS_DEF_MAPINFOFINALIZATIONPASS
52 #include "flang/Optimizer/OpenMP/Passes.h.inc"
53 } // namespace flangomp
54 
55 namespace {
56 class MapInfoFinalizationPass
57     : public flangomp::impl::MapInfoFinalizationPassBase<
58           MapInfoFinalizationPass> {
59   /// Helper class tracking a members parent and its
60   /// placement in the parents member list
61   struct ParentAndPlacement {
62     mlir::omp::MapInfoOp parent;
63     size_t index;
64   };
65 
66   /// Tracks any intermediate function/subroutine local allocations we
67   /// generate for the descriptors of box type dummy arguments, so that
68   /// we can retrieve it for subsequent reuses within the functions
69   /// scope.
70   ///
71   ///      descriptor defining op
72   ///      |                  corresponding local alloca
73   ///      |                  |
74   std::map<mlir::Operation *, mlir::Value> localBoxAllocas;
75 
76   /// getMemberUserList gathers all users of a particular MapInfoOp that are
77   /// other MapInfoOp's and places them into the mapMemberUsers list, which
78   /// records the map that the current argument MapInfoOp "op" is part of
79   /// alongside the placement of "op" in the recorded users members list. The
80   /// intent of the generated list is to find all MapInfoOp's that may be
81   /// considered parents of the passed in "op" and in which it shows up in the
82   /// member list, alongside collecting the placement information of "op" in its
83   /// parents member list.
84   void
85   getMemberUserList(mlir::omp::MapInfoOp op,
86                     llvm::SmallVectorImpl<ParentAndPlacement> &mapMemberUsers) {
87     for (auto *user : op->getUsers())
88       if (auto map = mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(user))
89         for (auto [i, mapMember] : llvm::enumerate(map.getMembers()))
90           if (mapMember.getDefiningOp() == op)
91             mapMemberUsers.push_back({map, i});
92   }
93 
94   void getAsIntegers(llvm::ArrayRef<mlir::Attribute> values,
95                      llvm::SmallVectorImpl<int64_t> &ints) {
96     ints.reserve(values.size());
97     llvm::transform(values, std::back_inserter(ints),
98                     [](mlir::Attribute value) {
99                       return mlir::cast<mlir::IntegerAttr>(value).getInt();
100                     });
101   }
102 
103   /// This function will expand a MapInfoOp's member indices back into a vector
104   /// so that they can be trivially modified as unfortunately the attribute type
105   /// that's used does not have modifiable fields at the moment (generally
106   /// awkward to work with)
107   void getMemberIndicesAsVectors(
108       mlir::omp::MapInfoOp mapInfo,
109       llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &indices) {
110     indices.reserve(mapInfo.getMembersIndexAttr().getValue().size());
111     llvm::transform(mapInfo.getMembersIndexAttr().getValue(),
112                     std::back_inserter(indices), [this](mlir::Attribute value) {
113                       auto memberIndex = mlir::cast<mlir::ArrayAttr>(value);
114                       llvm::SmallVector<int64_t> indexes;
115                       getAsIntegers(memberIndex.getValue(), indexes);
116                       return indexes;
117                     });
118   }
119 
120   /// When provided a MapInfoOp containing a descriptor type that
121   /// we must expand into multiple maps this function will extract
122   /// the value from it and return it, in certain cases we must
123   /// generate a new allocation to store into so that the
124   /// fir::BoxOffsetOp we utilise to access the descriptor datas
125   /// base address can be utilised.
126   mlir::Value getDescriptorFromBoxMap(mlir::omp::MapInfoOp boxMap,
127                                       fir::FirOpBuilder &builder) {
128     mlir::Value descriptor = boxMap.getVarPtr();
129     if (!fir::isTypeWithDescriptor(boxMap.getVarType()))
130       if (auto addrOp = mlir::dyn_cast_if_present<fir::BoxAddrOp>(
131               boxMap.getVarPtr().getDefiningOp()))
132         descriptor = addrOp.getVal();
133 
134     if (!mlir::isa<fir::BaseBoxType>(descriptor.getType()))
135       return descriptor;
136 
137     mlir::Value &slot = localBoxAllocas[descriptor.getDefiningOp()];
138     if (slot) {
139       return slot;
140     }
141 
142     // The fir::BoxOffsetOp only works with !fir.ref<!fir.box<...>> types, as
143     // allowing it to access non-reference box operations can cause some
144     // problematic SSA IR. However, in the case of assumed shape's the type
145     // is not a !fir.ref, in these cases to retrieve the appropriate
146     // !fir.ref<!fir.box<...>> to access the data we need to map we must
147     // perform an alloca and then store to it and retrieve the data from the new
148     // alloca.
149     mlir::OpBuilder::InsertPoint insPt = builder.saveInsertionPoint();
150     mlir::Block *allocaBlock = builder.getAllocaBlock();
151     mlir::Location loc = boxMap->getLoc();
152     assert(allocaBlock && "No alloca block found for this top level op");
153     builder.setInsertionPointToStart(allocaBlock);
154     auto alloca = builder.create<fir::AllocaOp>(loc, descriptor.getType());
155     builder.restoreInsertionPoint(insPt);
156     builder.create<fir::StoreOp>(loc, descriptor, alloca);
157     return slot = alloca;
158   }
159 
160   /// Function that generates a FIR operation accessing the descriptor's
161   /// base address (BoxOffsetOp) and a MapInfoOp for it. The most
162   /// important thing to note is that we normally move the bounds from
163   /// the descriptor map onto the base address map.
164   mlir::omp::MapInfoOp genBaseAddrMap(mlir::Value descriptor,
165                                       mlir::OperandRange bounds,
166                                       int64_t mapType,
167                                       fir::FirOpBuilder &builder) {
168     mlir::Location loc = descriptor.getLoc();
169     mlir::Value baseAddrAddr = builder.create<fir::BoxOffsetOp>(
170         loc, descriptor, fir::BoxFieldAttr::base_addr);
171 
172     mlir::Type underlyingVarType =
173         llvm::cast<mlir::omp::PointerLikeType>(
174             fir::unwrapRefType(baseAddrAddr.getType()))
175             .getElementType();
176     if (auto seqType = llvm::dyn_cast<fir::SequenceType>(underlyingVarType))
177       if (seqType.hasDynamicExtents())
178         underlyingVarType = seqType.getEleTy();
179 
180     // Member of the descriptor pointing at the allocated data
181     return builder.create<mlir::omp::MapInfoOp>(
182         loc, baseAddrAddr.getType(), descriptor,
183         mlir::TypeAttr::get(underlyingVarType), baseAddrAddr,
184         /*members=*/mlir::SmallVector<mlir::Value>{},
185         /*membersIndex=*/mlir::ArrayAttr{}, bounds,
186         builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
187         builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
188             mlir::omp::VariableCaptureKind::ByRef),
189         /*name=*/builder.getStringAttr(""),
190         /*partial_map=*/builder.getBoolAttr(false));
191   }
192 
193   /// This function adjusts the member indices vector to include a new
194   /// base address member. We take the position of the descriptor in
195   /// the member indices list, which is the index data that the base
196   /// addresses index will be based off of, as the base address is
197   /// a member of the descriptor. We must also alter other members
198   /// that are members of this descriptor to account for the addition
199   /// of the base address index.
200   void adjustMemberIndices(
201       llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &memberIndices,
202       size_t memberIndex) {
203     llvm::SmallVector<int64_t> baseAddrIndex = memberIndices[memberIndex];
204 
205     // If we find another member that is "derived/a member of" the descriptor
206     // that is not the descriptor itself, we must insert a 0 for the new base
207     // address we have just added for the descriptor into the list at the
208     // appropriate position to maintain correctness of the positional/index data
209     // for that member.
210     for (llvm::SmallVector<int64_t> &member : memberIndices)
211       if (member.size() > baseAddrIndex.size() &&
212           std::equal(baseAddrIndex.begin(), baseAddrIndex.end(),
213                      member.begin()))
214         member.insert(std::next(member.begin(), baseAddrIndex.size()), 0);
215 
216     // Add the base address index to the main base address member data
217     baseAddrIndex.push_back(0);
218 
219     // Insert our newly created baseAddrIndex into the larger list of indices at
220     // the correct location.
221     memberIndices.insert(std::next(memberIndices.begin(), memberIndex + 1),
222                          baseAddrIndex);
223   }
224 
225   /// Adjusts the descriptor's map type. The main alteration that is done
226   /// currently is transforming the map type to `OMP_MAP_TO` where possible.
227   /// This is because we will always need to map the descriptor to device
228   /// (or at the very least it seems to be the case currently with the
229   /// current lowered kernel IR), as without the appropriate descriptor
230   /// information on the device there is a risk of the kernel IR
231   /// requesting for various data that will not have been copied to
232   /// perform things like indexing. This can cause segfaults and
233   /// memory access errors. However, we do not need this data mapped
234   /// back to the host from the device, as per the OpenMP spec we cannot alter
235   /// the data via resizing or deletion on the device. Discarding any
236   /// descriptor alterations via no map back is reasonable (and required
237   /// for certain segments of descriptor data like the type descriptor that are
238   /// global constants). This alteration is only inapplicable to `target exit`
239   /// and `target update` currently, and that's due to `target exit` not
240   /// allowing `to` mappings, and `target update` not allowing both `to` and
241   /// `from` simultaneously. We currently try to maintain the `implicit` flag
242   /// where necessary, although it does not seem strictly required.
243   unsigned long getDescriptorMapType(unsigned long mapTypeFlag,
244                                      mlir::Operation *target) {
245     if (llvm::isa_and_nonnull<mlir::omp::TargetExitDataOp,
246                               mlir::omp::TargetUpdateOp>(target))
247       return mapTypeFlag;
248 
249     bool hasImplicitMap =
250         (llvm::omp::OpenMPOffloadMappingFlags(mapTypeFlag) &
251          llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT) ==
252         llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
253 
254     return llvm::to_underlying(
255         hasImplicitMap
256             ? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
257                   llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT
258             : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO);
259   }
260 
261   mlir::omp::MapInfoOp genDescriptorMemberMaps(mlir::omp::MapInfoOp op,
262                                                fir::FirOpBuilder &builder,
263                                                mlir::Operation *target) {
264     llvm::SmallVector<ParentAndPlacement> mapMemberUsers;
265     getMemberUserList(op, mapMemberUsers);
266 
267     // TODO: map the addendum segment of the descriptor, similarly to the
268     // base address/data pointer member.
269     mlir::Value descriptor = getDescriptorFromBoxMap(op, builder);
270     auto baseAddr = genBaseAddrMap(descriptor, op.getBounds(),
271                                    op.getMapType().value_or(0), builder);
272     mlir::ArrayAttr newMembersAttr;
273     mlir::SmallVector<mlir::Value> newMembers;
274     llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
275 
276     if (!mapMemberUsers.empty() || !op.getMembers().empty())
277       getMemberIndicesAsVectors(
278           !mapMemberUsers.empty() ? mapMemberUsers[0].parent : op,
279           memberIndices);
280 
281     // If the operation that we are expanding with a descriptor has a user
282     // (parent), then we have to expand the parent's member indices to reflect
283     // the adjusted member indices for the base address insertion. However, if
284     // it does not then we are expanding a MapInfoOp without any pre-existing
285     // member information to now have one new member for the base address, or
286     // we are expanding a parent that is a descriptor and we have to adjust
287     // all of its members to reflect the insertion of the base address.
288     if (!mapMemberUsers.empty()) {
289       // Currently, there should only be one user per map when this pass
290       // is executed. Either a parent map, holding the current map in its
291       // member list, or a target operation that holds a map clause. This
292       // may change in the future if we aim to refactor the MLIR for map
293       // clauses to allow sharing of duplicate maps across target
294       // operations.
295       assert(mapMemberUsers.size() == 1 &&
296              "OMPMapInfoFinalization currently only supports single users of a "
297              "MapInfoOp");
298       ParentAndPlacement mapUser = mapMemberUsers[0];
299       adjustMemberIndices(memberIndices, mapUser.index);
300       llvm::SmallVector<mlir::Value> newMemberOps;
301       for (auto v : mapUser.parent.getMembers()) {
302         newMemberOps.push_back(v);
303         if (v == op)
304           newMemberOps.push_back(baseAddr);
305       }
306       mapUser.parent.getMembersMutable().assign(newMemberOps);
307       mapUser.parent.setMembersIndexAttr(
308           builder.create2DI64ArrayAttr(memberIndices));
309     } else {
310       newMembers.push_back(baseAddr);
311       if (!op.getMembers().empty()) {
312         for (auto &indices : memberIndices)
313           indices.insert(indices.begin(), 0);
314         memberIndices.insert(memberIndices.begin(), {0});
315         newMembersAttr = builder.create2DI64ArrayAttr(memberIndices);
316         newMembers.append(op.getMembers().begin(), op.getMembers().end());
317       } else {
318         llvm::SmallVector<llvm::SmallVector<int64_t>> memberIdx = {{0}};
319         newMembersAttr = builder.create2DI64ArrayAttr(memberIdx);
320       }
321     }
322 
323     mlir::omp::MapInfoOp newDescParentMapOp =
324         builder.create<mlir::omp::MapInfoOp>(
325             op->getLoc(), op.getResult().getType(), descriptor,
326             mlir::TypeAttr::get(fir::unwrapRefType(descriptor.getType())),
327             /*varPtrPtr=*/mlir::Value{}, newMembers, newMembersAttr,
328             /*bounds=*/mlir::SmallVector<mlir::Value>{},
329             builder.getIntegerAttr(
330                 builder.getIntegerType(64, false),
331                 getDescriptorMapType(op.getMapType().value_or(0), target)),
332             op.getMapCaptureTypeAttr(), op.getNameAttr(),
333             /*partial_map=*/builder.getBoolAttr(false));
334     op.replaceAllUsesWith(newDescParentMapOp.getResult());
335     op->erase();
336     return newDescParentMapOp;
337   }
338 
339   // We add all mapped record members not directly used in the target region
340   // to the block arguments in front of their parent and we place them into
341   // the map operands list for consistency.
342   //
343   // These indirect uses (via accesses to their parent) will still be
344   // mapped individually in most cases, and a parent mapping doesn't
345   // guarantee the parent will be mapped in its totality, partial
346   // mapping is common.
347   //
348   // For example:
349   //    map(tofrom: x%y)
350   //
351   // Will generate a mapping for "x" (the parent) and "y" (the member).
352   // The parent "x" will not be mapped, but the member "y" will.
353   // However, we must have the parent as a BlockArg and MapOperand
354   // in these cases, to maintain the correct uses within the region and
355   // to help tracking that the member is part of a larger object.
356   //
357   // In the case of:
358   //    map(tofrom: x%y, x%z)
359   //
360   // The parent member becomes more critical, as we perform a partial
361   // structure mapping where we link the mapping of the members y
362   // and z together via the parent x. We do this at a kernel argument
363   // level in LLVM IR and not just MLIR, which is important to maintain
364   // similarity to Clang and for the runtime to do the correct thing.
365   // However, we still do not map the structure in its totality but
366   // rather we generate an un-sized "binding" map entry for it.
367   //
368   // In the case of:
369   //    map(tofrom: x, x%y, x%z)
370   //
371   // We do actually map the entirety of "x", so the explicit mapping of
372   // x%y, x%z becomes unnecessary. It is redundant to write this from a
373   // Fortran OpenMP perspective (although it is legal), as even if the
374   // members were allocatables or pointers, we are mandated by the
375   // specification to map these (and any recursive components) in their
376   // entirety, which is different to the C++ equivalent, which requires
377   // explicit mapping of these segments.
378   void addImplicitMembersToTarget(mlir::omp::MapInfoOp op,
379                                   fir::FirOpBuilder &builder,
380                                   mlir::Operation *target) {
381     auto mapClauseOwner =
382         llvm::dyn_cast_if_present<mlir::omp::MapClauseOwningOpInterface>(
383             target);
384     // TargetDataOp is technically a MapClauseOwningOpInterface, so we
385     // do not need to explicitly check for the extra cases here for use_device
386     // addr/ptr
387     if (!mapClauseOwner)
388       return;
389 
390     auto addOperands = [&](mlir::MutableOperandRange &mutableOpRange,
391                            mlir::Operation *directiveOp,
392                            unsigned blockArgInsertIndex = 0) {
393       if (!llvm::is_contained(mutableOpRange.getAsOperandRange(),
394                               op.getResult()))
395         return;
396 
397       // There doesn't appear to be a simple way to convert MutableOperandRange
398       // to a vector currently, so we instead use a for_each to populate our
399       // vector.
400       llvm::SmallVector<mlir::Value> newMapOps;
401       newMapOps.reserve(mutableOpRange.size());
402       llvm::for_each(
403           mutableOpRange.getAsOperandRange(),
404           [&newMapOps](mlir::Value oper) { newMapOps.push_back(oper); });
405 
406       for (auto mapMember : op.getMembers()) {
407         if (llvm::is_contained(mutableOpRange.getAsOperandRange(), mapMember))
408           continue;
409         newMapOps.push_back(mapMember);
410         if (directiveOp) {
411           directiveOp->getRegion(0).insertArgument(
412               blockArgInsertIndex, mapMember.getType(), mapMember.getLoc());
413           blockArgInsertIndex++;
414         }
415       }
416 
417       mutableOpRange.assign(newMapOps);
418     };
419 
420     auto argIface =
421         llvm::dyn_cast<mlir::omp::BlockArgOpenMPOpInterface>(target);
422 
423     if (auto mapClauseOwner =
424             llvm::dyn_cast<mlir::omp::MapClauseOwningOpInterface>(target)) {
425       mlir::MutableOperandRange mapMutableOpRange =
426           mapClauseOwner.getMapVarsMutable();
427       unsigned blockArgInsertIndex =
428           argIface
429               ? argIface.getMapBlockArgsStart() + argIface.numMapBlockArgs()
430               : 0;
431       addOperands(mapMutableOpRange,
432                   llvm::dyn_cast_if_present<mlir::omp::TargetOp>(
433                       argIface.getOperation()),
434                   blockArgInsertIndex);
435     }
436 
437     if (auto targetDataOp = llvm::dyn_cast<mlir::omp::TargetDataOp>(target)) {
438       mlir::MutableOperandRange useDevAddrMutableOpRange =
439           targetDataOp.getUseDeviceAddrVarsMutable();
440       addOperands(useDevAddrMutableOpRange, target,
441                   argIface.getUseDeviceAddrBlockArgsStart() +
442                       argIface.numUseDeviceAddrBlockArgs());
443 
444       mlir::MutableOperandRange useDevPtrMutableOpRange =
445           targetDataOp.getUseDevicePtrVarsMutable();
446       addOperands(useDevPtrMutableOpRange, target,
447                   argIface.getUseDevicePtrBlockArgsStart() +
448                       argIface.numUseDevicePtrBlockArgs());
449     }
450   }
451 
452   // We retrieve the first user that is a Target operation, of which
453   // there should only be one currently. Every MapInfoOp can be tied to
454   // at most one Target operation and at the minimum no operations.
455   // This may change in the future with IR cleanups/modifications,
456   // in which case this pass will need updating to support cases
457   // where a map can have more than one user and more than one of
458   // those users can be a Target operation. For now, we simply
459   // return the first target operation encountered, which may
460   // be on the parent MapInfoOp in the case of a member mapping.
461   // In that case, we traverse the MapInfoOp chain until we
462   // find the first TargetOp user.
463   mlir::Operation *getFirstTargetUser(mlir::omp::MapInfoOp mapOp) {
464     for (auto *user : mapOp->getUsers()) {
465       if (llvm::isa<mlir::omp::TargetOp, mlir::omp::TargetDataOp,
466                     mlir::omp::TargetUpdateOp, mlir::omp::TargetExitDataOp,
467                     mlir::omp::TargetEnterDataOp>(user))
468         return user;
469 
470       if (auto mapUser = llvm::dyn_cast<mlir::omp::MapInfoOp>(user))
471         return getFirstTargetUser(mapUser);
472     }
473 
474     return nullptr;
475   }
476 
477   // This pass executes on omp::MapInfoOp's containing descriptor based types
478   // (allocatables, pointers, assumed shape etc.) and expanding them into
479   // multiple omp::MapInfoOp's for each pointer member contained within the
480   // descriptor.
481   //
482   // From the perspective of the MLIR pass manager this runs on the top level
483   // operation (usually function) containing the MapInfoOp because this pass
484   // will mutate siblings of MapInfoOp.
485   void runOnOperation() override {
486     mlir::ModuleOp module = getOperation();
487     if (!module)
488       module = getOperation()->getParentOfType<mlir::ModuleOp>();
489     fir::KindMapping kindMap = fir::getKindMapping(module);
490     fir::FirOpBuilder builder{module, std::move(kindMap)};
491 
492     // We wish to maintain some function level scope (currently
493     // just local function scope variables used to load and store box
494     // variables into so we can access their base address, an
495     // quirk of box_offset requires us to have an in memory box, but Fortran
496     // in certain cases does not provide this) whilst not subjecting
497     // ourselves to the possibility of race conditions while this pass
498     // undergoes frequent re-iteration for the near future. So we loop
499     // over function in the module and then map.info inside of those.
500     getOperation()->walk([&](mlir::func::FuncOp func) {
501       // clear all local allocations we made for any boxes in any prior
502       // iterations from previous function scopes.
503       localBoxAllocas.clear();
504 
505       // First, walk `omp.map.info` ops to see if any record members should be
506       // implicitly mapped.
507       func->walk([&](mlir::omp::MapInfoOp op) {
508         mlir::Type underlyingType =
509             fir::unwrapRefType(op.getVarPtr().getType());
510 
511         // TODO Test with and support more complicated cases; like arrays for
512         // records, for example.
513         if (!fir::isRecordWithAllocatableMember(underlyingType))
514           return mlir::WalkResult::advance();
515 
516         // TODO For now, only consider `omp.target` ops. Other ops that support
517         // `map` clauses will follow later.
518         mlir::omp::TargetOp target =
519             mlir::dyn_cast_if_present<mlir::omp::TargetOp>(
520                 getFirstTargetUser(op));
521 
522         if (!target)
523           return mlir::WalkResult::advance();
524 
525         auto mapClauseOwner =
526             llvm::dyn_cast<mlir::omp::MapClauseOwningOpInterface>(*target);
527 
528         int64_t mapVarIdx = mapClauseOwner.getOperandIndexForMap(op);
529         assert(mapVarIdx >= 0 &&
530                mapVarIdx <
531                    static_cast<int64_t>(mapClauseOwner.getMapVars().size()));
532 
533         auto argIface =
534             llvm::dyn_cast<mlir::omp::BlockArgOpenMPOpInterface>(*target);
535         // TODO How should `map` block argument that correspond to: `private`,
536         // `use_device_addr`, `use_device_ptr`, be handled?
537         mlir::BlockArgument opBlockArg = argIface.getMapBlockArgs()[mapVarIdx];
538         llvm::SetVector<mlir::Operation *> mapVarForwardSlice;
539         mlir::getForwardSlice(opBlockArg, &mapVarForwardSlice);
540 
541         mapVarForwardSlice.remove_if([&](mlir::Operation *sliceOp) {
542           // TODO Support coordinate_of ops.
543           //
544           // TODO Support call ops by recursively examining the forward slice of
545           // the corresponding parameter to the field in the called function.
546           return !mlir::isa<hlfir::DesignateOp>(sliceOp);
547         });
548 
549         auto recordType = mlir::cast<fir::RecordType>(underlyingType);
550         llvm::SmallVector<mlir::Value> newMapOpsForFields;
551         llvm::SmallVector<int64_t> fieldIndicies;
552 
553         for (auto fieldMemTyPair : recordType.getTypeList()) {
554           auto &field = fieldMemTyPair.first;
555           auto memTy = fieldMemTyPair.second;
556 
557           bool shouldMapField =
558               llvm::find_if(mapVarForwardSlice, [&](mlir::Operation *sliceOp) {
559                 if (!fir::isAllocatableType(memTy))
560                   return false;
561 
562                 auto designateOp = mlir::dyn_cast<hlfir::DesignateOp>(sliceOp);
563                 if (!designateOp)
564                   return false;
565 
566                 return designateOp.getComponent() &&
567                        designateOp.getComponent()->strref() == field;
568               }) != mapVarForwardSlice.end();
569 
570           // TODO Handle recursive record types. Adapting
571           // `createParentSymAndGenIntermediateMaps` to work direclty on MLIR
572           // entities might be helpful here.
573 
574           if (!shouldMapField)
575             continue;
576 
577           int64_t fieldIdx = recordType.getFieldIndex(field);
578           bool alreadyMapped = [&]() {
579             if (op.getMembersIndexAttr())
580               for (auto indexList : op.getMembersIndexAttr()) {
581                 auto indexListAttr = mlir::cast<mlir::ArrayAttr>(indexList);
582                 if (indexListAttr.size() == 1 &&
583                     mlir::cast<mlir::IntegerAttr>(indexListAttr[0]).getInt() ==
584                         fieldIdx)
585                   return true;
586               }
587 
588             return false;
589           }();
590 
591           if (alreadyMapped)
592             continue;
593 
594           builder.setInsertionPoint(op);
595           mlir::Value fieldIdxVal = builder.createIntegerConstant(
596               op.getLoc(), mlir::IndexType::get(builder.getContext()),
597               fieldIdx);
598           auto fieldCoord = builder.create<fir::CoordinateOp>(
599               op.getLoc(), builder.getRefType(memTy), op.getVarPtr(),
600               fieldIdxVal);
601           fir::factory::AddrAndBoundsInfo info =
602               fir::factory::getDataOperandBaseAddr(
603                   builder, fieldCoord, /*isOptional=*/false, op.getLoc());
604           llvm::SmallVector<mlir::Value> bounds =
605               fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
606                                                  mlir::omp::MapBoundsType>(
607                   builder, info,
608                   hlfir::translateToExtendedValue(op.getLoc(), builder,
609                                                   hlfir::Entity{fieldCoord})
610                       .first,
611                   /*dataExvIsAssumedSize=*/false, op.getLoc());
612 
613           mlir::omp::MapInfoOp fieldMapOp =
614               builder.create<mlir::omp::MapInfoOp>(
615                   op.getLoc(), fieldCoord.getResult().getType(),
616                   fieldCoord.getResult(),
617                   mlir::TypeAttr::get(
618                       fir::unwrapRefType(fieldCoord.getResult().getType())),
619                   /*varPtrPtr=*/mlir::Value{},
620                   /*members=*/mlir::ValueRange{},
621                   /*members_index=*/mlir::ArrayAttr{},
622                   /*bounds=*/bounds, op.getMapTypeAttr(),
623                   builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
624                       mlir::omp::VariableCaptureKind::ByRef),
625                   builder.getStringAttr(op.getNameAttr().strref() + "." +
626                                         field + ".implicit_map"),
627                   /*partial_map=*/builder.getBoolAttr(false));
628           newMapOpsForFields.emplace_back(fieldMapOp);
629           fieldIndicies.emplace_back(fieldIdx);
630         }
631 
632         if (newMapOpsForFields.empty())
633           return mlir::WalkResult::advance();
634 
635         op.getMembersMutable().append(newMapOpsForFields);
636         llvm::SmallVector<llvm::SmallVector<int64_t>> newMemberIndices;
637         mlir::ArrayAttr oldMembersIdxAttr = op.getMembersIndexAttr();
638 
639         if (oldMembersIdxAttr)
640           for (mlir::Attribute indexList : oldMembersIdxAttr) {
641             llvm::SmallVector<int64_t> listVec;
642 
643             for (mlir::Attribute index : mlir::cast<mlir::ArrayAttr>(indexList))
644               listVec.push_back(mlir::cast<mlir::IntegerAttr>(index).getInt());
645 
646             newMemberIndices.emplace_back(std::move(listVec));
647           }
648 
649         for (int64_t newFieldIdx : fieldIndicies)
650           newMemberIndices.emplace_back(
651               llvm::SmallVector<int64_t>(1, newFieldIdx));
652 
653         op.setMembersIndexAttr(builder.create2DI64ArrayAttr(newMemberIndices));
654         op.setPartialMap(true);
655 
656         return mlir::WalkResult::advance();
657       });
658 
659       func->walk([&](mlir::omp::MapInfoOp op) {
660         // TODO: Currently only supports a single user for the MapInfoOp. This
661         // is fine for the moment, as the Fortran frontend will generate a
662         // new MapInfoOp with at most one user currently. In the case of
663         // members of other objects, like derived types, the user would be the
664         // parent. In cases where it's a regular non-member map, the user would
665         // be the target operation it is being mapped by.
666         //
667         // However, when/if we optimise/cleanup the IR we will have to extend
668         // this pass to support multiple users, as we may wish to have a map
669         // be re-used by multiple users (e.g. across multiple targets that map
670         // the variable and have identical map properties).
671         assert(llvm::hasSingleElement(op->getUsers()) &&
672                "OMPMapInfoFinalization currently only supports single users "
673                "of a MapInfoOp");
674 
675         if (fir::isTypeWithDescriptor(op.getVarType()) ||
676             mlir::isa_and_present<fir::BoxAddrOp>(
677                 op.getVarPtr().getDefiningOp())) {
678           builder.setInsertionPoint(op);
679           mlir::Operation *targetUser = getFirstTargetUser(op);
680           assert(targetUser && "expected user of map operation was not found");
681           genDescriptorMemberMaps(op, builder, targetUser);
682         }
683       });
684 
685       // Wait until after we have generated all of our maps to add them onto
686       // the target's block arguments, simplifying the process as there would be
687       // no need to avoid accidental duplicate additions.
688       func->walk([&](mlir::omp::MapInfoOp op) {
689         mlir::Operation *targetUser = getFirstTargetUser(op);
690         assert(targetUser && "expected user of map operation was not found");
691         addImplicitMembersToTarget(op, builder, targetUser);
692       });
693     });
694   }
695 };
696 
697 } // namespace
698