Lines Matching defs:omp
44 using namespace Fortran::lower::omp;
139 bool apply(mlir::omp::LoopNestOperands &clauseOps,
159 bool apply(mlir::omp::ParallelOperands &clauseOps) {
175 bool apply(mlir::omp::TeamsOperands &clauseOps) {
186 mlir::omp::HostEvaluatedOperands ops;
192 /// Stack of \see HostEvalInfo to represent the current nest of \c omp.target
214 mlir::omp::BlockArgOpenMPOpInterface op,
343 /// by an `omp.map.info` operation.
350 auto mapInfo = map.getDefiningOp<mlir::omp::MapInfoOp>();
351 assert(mapInfo && "expected all map vars to be defined by omp.map.info");
363 llvm::omp::Directive
368 return llvm::omp::OMPD_allocators;
371 return llvm::omp::OMPD_atomic;
379 return llvm::omp::OMPD_critical;
382 return llvm::omp::OMPD_allocate;
385 return llvm::omp::OMPD_dispatch;
388 return llvm::omp::OMPD_allocate;
396 return llvm::omp::OMPD_section;
411 return llvm::omp::OMPD_flush;
414 return llvm::omp::OMPD_cancel;
417 return llvm::omp::OMPD_cancellation_point;
420 return llvm::omp::OMPD_metadirective;
423 return llvm::omp::OMPD_depobj;
430 return llvm::omp::OMPD_error;
433 return llvm::omp::OMPD_nothing;
451 /// host_eval operands of the associated \c omp.target operation, and also to be
453 /// operands of the \c omp.teams, \c omp.parallel or \c omp.loop_nest
507 -> std::optional<llvm::omp::Directive> {
511 llvm::omp::Directive dir;
530 using namespace llvm::omp;
542 List<lower::omp::Clause> nestedClauses;
607 llvm::omp::allTargetSet.test(extractOmpDirective(*ompEval)) &&
612 // ones only applied to omp.target.
613 List<lower::omp::Clause> clauses;
723 if (mlir::isa<mlir::omp::ThreadprivateOp>(op))
724 symValue = mlir::dyn_cast<mlir::omp::ThreadprivateOp>(op).getSymAddr();
725 return firOpBuilder.create<mlir::omp::ThreadprivateOp>(
836 mlir::omp::DeclareTargetOperands &clauseOps,
843 gatherFuncAndVarSyms(objects, mlir::omp::DeclareTargetCaptureClause::to,
854 symbolAndClause.emplace_back(mlir::omp::DeclareTargetCaptureClause::to,
866 llvm::omp::Directive::OMPD_declare_target);
876 mlir::omp::DeclareTargetOperands clauseOps;
896 static std::optional<mlir::omp::DeclareTargetDeviceType>
901 mlir::omp::DeclareTargetOperands clauseOps;
920 /// Set up the entry block of the given `omp.loop_nest` operation, adding a
927 /// operations inside of the entry block of the `omp.loop_nest` operation and
940 std::pair<mlir::omp::BlockArgOpenMPOpInterface, const EntryBlockArgs &>>
988 mlir::omp::DeclareTargetCaptureClause captureClause,
989 mlir::omp::DeclareTargetDeviceType deviceType) {
991 auto declareTargetOp = llvm::dyn_cast<mlir::omp::DeclareTargetInterface>(op);
1003 declareTargetOp.setDeclareTarget(mlir::omp::DeclareTargetDeviceType::any,
1026 lower::pft::Evaluation &eval, llvm::omp::Directive dir)
1061 llvm::omp::Directive dir;
1107 if (lower::omp::isLastItemInQueue(item, queue) &&
1109 lower::createEmptyRegionBlocks<mlir::omp::TerminatorOp, mlir::omp::YieldOp>(
1115 bool isLoop = llvm::omp::getDirectiveAssociation(info.dir) ==
1116 llvm::omp::Association::Loop;
1123 Fortran::lower::omp::isLastItemInQueue(item, queue),
1128 if (info.dir == llvm::omp::Directive::OMPD_parallel) {
1179 // Additionally, some ops (e.g. omp.sections) require only 1 block in
1206 if (auto loopNest = llvm::dyn_cast<mlir::omp::LoopNestOp>(op)) {
1207 llvm::SmallVector<mlir::omp::LoopWrapperInterface> wrappers;
1234 mlir::omp::TargetDataOp &dataOp, const EntryBlockArgs &args,
1253 lower::createEmptyRegionBlocks<mlir::omp::TerminatorOp, mlir::omp::YieldOp>(
1257 firOpBuilder.create<mlir::omp::TerminatorOp>(currentLocation);
1309 mlir::omp::TargetOp &targetOp, const EntryBlockArgs &args,
1313 auto argIface = llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(*targetOp);
1359 fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
1360 mlir::omp::MapBoundsType>(
1370 llvm::omp::OpenMPOffloadMappingFlags mapFlag =
1371 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
1372 mlir::omp::VariableCaptureKind captureKind =
1373 mlir::omp::VariableCaptureKind::ByRef;
1381 captureKind = mlir::omp::VariableCaptureKind::ByCopy;
1383 mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
1392 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
1428 if (lower::omp::isLastItemInQueue(item, queue) &&
1430 lower::createEmptyRegionBlocks<mlir::omp::TerminatorOp, mlir::omp::YieldOp>(
1434 firOpBuilder.create<mlir::omp::TerminatorOp>(currentLocation);
1475 OpTy::template hasTrait<mlir::omp::LoopWrapperInterface::Trait>(),
1495 mlir::omp::CriticalDeclareOperands &clauseOps, llvm::StringRef name) {
1507 mlir::omp::DistributeOperands &clauseOps) {
1524 clause::SeqCst>(loc, llvm::omp::OMPD_flush);
1531 mlir::Location loc, mlir::omp::LoopNestOperands &clauseOps,
1544 mlir::omp::LoopOperands &clauseOps,
1550 cp.processTODO<clause::Lastprivate>(loc, llvm::omp::Directive::OMPD_loop);
1557 mlir::omp::MaskedOperands &clauseOps) {
1566 mlir::omp::OrderedRegionOperands &clauseOps) {
1568 cp.processTODO<clause::Simd>(loc, llvm::omp::Directive::OMPD_ordered);
1574 mlir::Location loc, mlir::omp::ParallelOperands &clauseOps,
1578 cp.processIf(llvm::omp::Directive::OMPD_parallel, clauseOps);
1590 mlir::omp::SectionsOperands &clauseOps,
1602 mlir::omp::SimdOperands &clauseOps,
1606 cp.processIf(llvm::omp::Directive::OMPD_simd, clauseOps);
1613 cp.processTODO<clause::Linear>(loc, llvm::omp::Directive::OMPD_simd);
1619 mlir::omp::SingleOperands &clauseOps) {
1631 mlir::omp::TargetOperands &clauseOps,
1645 cp.processIf(llvm::omp::Directive::OMPD_target, clauseOps);
1653 loc, llvm::omp::Directive::OMPD_target);
1657 cp.processTODO<clause::Private>(loc, llvm::omp::Directive::OMPD_target);
1663 mlir::Location loc, mlir::omp::TargetDataOperands &clauseOps,
1668 cp.processIf(llvm::omp::Directive::OMPD_target_data, clauseOps);
1689 mlir::Location loc, llvm::omp::Directive directive,
1690 mlir::omp::TargetEnterExitUpdateDataOperands &clauseOps) {
1696 if (directive == llvm::omp::Directive::OMPD_target_update)
1708 mlir::omp::TaskOperands &clauseOps) {
1713 cp.processIf(llvm::omp::Directive::OMPD_task, clauseOps);
1721 loc, llvm::omp::Directive::OMPD_task);
1727 mlir::omp::TaskgroupOperands &clauseOps) {
1731 llvm::omp::Directive::OMPD_taskgroup);
1737 mlir::omp::TaskwaitOperands &clauseOps) {
1740 loc, llvm::omp::Directive::OMPD_taskwait);
1747 mlir::omp::WorkshareOperands &clauseOps) {
1755 mlir::Location loc, mlir::omp::TeamsOperands &clauseOps,
1759 cp.processIf(llvm::omp::Directive::OMPD_teams, clauseOps);
1773 mlir::Location loc, mlir::omp::WsloopOperands &clauseOps,
1783 loc, llvm::omp::Directive::OMPD_do);
1790 static mlir::omp::BarrierOp
1795 return converter.getFirOpBuilder().create<mlir::omp::BarrierOp>(loc);
1798 static mlir::omp::CriticalOp
1810 auto global = mod.lookupSymbol<mlir::omp::CriticalDeclareOp>(nameStr);
1812 mlir::omp::CriticalDeclareOperands clauseOps;
1817 global = modBuilder.create<mlir::omp::CriticalDeclareOp>(loc, clauseOps);
1823 return genOpWithBody<mlir::omp::CriticalOp>(
1825 llvm::omp::Directive::OMPD_critical),
1829 static mlir::omp::FlushOp
1838 return converter.getFirOpBuilder().create<mlir::omp::FlushOp>(
1842 static mlir::omp::LoopNestOp genLoopNestOp(
1846 ConstructQueue::const_iterator item, mlir::omp::LoopNestOperands &clauseOps,
1849 std::pair<mlir::omp::BlockArgOpenMPOpInterface, const EntryBlockArgs &>>
1851 llvm::omp::Directive directive, DataSharingProcessor &dsp) {
1860 return genOpWithBody<mlir::omp::LoopNestOp>(
1875 mlir::omp::LoopOperands loopClauseOps;
1885 mlir::omp::LoopNestOperands loopNestClauseOps;
1897 genWrapperOp<mlir::omp::LoopOp>(converter, loc, loopClauseOps, loopArgs);
1900 llvm::omp::Directive::OMPD_loop, dsp);
1903 static mlir::omp::MaskedOp
1909 mlir::omp::MaskedOperands clauseOps;
1912 return genOpWithBody<mlir::omp::MaskedOp>(
1914 llvm::omp::Directive::OMPD_masked),
1918 static mlir::omp::MasterOp
1923 return genOpWithBody<mlir::omp::MasterOp>(
1925 llvm::omp::Directive::OMPD_master),
1929 static mlir::omp::OrderedOp
1938 static mlir::omp::OrderedRegionOp
1944 mlir::omp::OrderedRegionOperands clauseOps;
1947 return genOpWithBody<mlir::omp::OrderedRegionOp>(
1949 llvm::omp::Directive::OMPD_ordered),
1953 static mlir::omp::ParallelOp
1958 mlir::omp::ParallelOperands &clauseOps,
1964 converter, llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(op), args);
1972 llvm::omp::Directive::OMPD_parallel)
1979 genOpWithBody<mlir::omp::ParallelOp>(genInfo, queue, item, clauseOps);
1987 static mlir::omp::SectionsOp
1993 mlir::omp::SectionsOperands clauseOps;
2003 lower::omp::isLastItemInQueue(item, queue),
2011 if (clause.id == llvm::omp::Clause::OMPC_lastprivate) {
2017 case llvm::omp::Clause::OMPC_firstprivate:
2018 case llvm::omp::Clause::OMPC_private:
2019 case llvm::omp::Clause::OMPC_shared:
2028 auto sectionsOp = builder.create<mlir::omp::SectionsOp>(loc, clauseOps);
2043 converter, llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(op), args);
2049 // because we need to run genReductionVars on each omp.section so that the
2063 sectionConstruct->source, llvm::omp::Directive::OMPD_section, {})};
2066 genOpWithBody<mlir::omp::SectionOp>(
2068 llvm::omp::Directive::OMPD_section)
2081 return llvm::isa<mlir::omp::SectionOp>(op);
2104 builder.create<mlir::omp::BarrierOp>(loc);
2118 static mlir::omp::SingleOp
2123 mlir::omp::SingleOperands clauseOps;
2126 return genOpWithBody<mlir::omp::SingleOp>(
2128 llvm::omp::Directive::OMPD_single)
2133 static mlir::omp::TargetOp
2141 llvm::cast<mlir::omp::OffloadModuleInterface>(*converter.getModuleOp())
2148 mlir::omp::TargetOperands clauseOps;
2156 lower::omp::isLastItemInQueue(item, queue),
2202 fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
2203 mlir::omp::MapBoundsType>(
2208 llvm::omp::OpenMPOffloadMappingFlags mapFlag =
2209 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
2210 mlir::omp::VariableCaptureKind captureKind =
2211 mlir::omp::VariableCaptureKind::ByRef;
2223 llvm::dyn_cast_if_present<mlir::omp::DeclareTargetInterface>(op);
2226 mlir::omp::DeclareTargetCaptureClause::link &&
2228 mlir::omp::DeclareTargetDeviceType::nohost) {
2229 mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
2230 mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
2233 captureKind = mlir::omp::VariableCaptureKind::ByCopy;
2235 mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
2236 mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
2247 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
2257 auto targetOp = firOpBuilder.create<mlir::omp::TargetOp>(loc, clauseOps);
2279 static mlir::omp::TargetDataOp
2286 mlir::omp::TargetDataOperands clauseOps;
2293 converter.getFirOpBuilder().create<mlir::omp::TargetDataOp>(loc,
2321 [[maybe_unused]] llvm::omp::Directive directive;
2322 if constexpr (std::is_same_v<OpTy, mlir::omp::TargetEnterDataOp>) {
2323 directive = llvm::omp::Directive::OMPD_target_enter_data;
2324 } else if constexpr (std::is_same_v<OpTy, mlir::omp::TargetExitDataOp>) {
2325 directive = llvm::omp::Directive::OMPD_target_exit_data;
2326 } else if constexpr (std::is_same_v<OpTy, mlir::omp::TargetUpdateOp>) {
2327 directive = llvm::omp::Directive::OMPD_target_update;
2332 mlir::omp::TargetEnterExitUpdateDataOperands clauseOps;
2339 static mlir::omp::TaskOp
2345 mlir::omp::TaskOperands clauseOps;
2349 return genOpWithBody<mlir::omp::TaskOp>(
2351 llvm::omp::Directive::OMPD_task)
2356 lower::omp::isLastItemInQueue(item, queue),
2367 llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(op),
2372 return genOpWithBody<mlir::omp::TaskOp>(
2374 llvm::omp::Directive::OMPD_task)
2381 static mlir::omp::TaskgroupOp
2387 mlir::omp::TaskgroupOperands clauseOps;
2390 return genOpWithBody<mlir::omp::TaskgroupOp>(
2392 llvm::omp::Directive::OMPD_taskgroup)
2397 static mlir::omp::TaskwaitOp
2403 mlir::omp::TaskwaitOperands clauseOps;
2405 return converter.getFirOpBuilder().create<mlir::omp::TaskwaitOp>(loc,
2409 static mlir::omp::TaskyieldOp
2415 return converter.getFirOpBuilder().create<mlir::omp::TaskyieldOp>(loc);
2418 static mlir::omp::WorkshareOp
2425 mlir::omp::WorkshareOperands clauseOps;
2429 return genOpWithBody<mlir::omp::WorkshareOp>(
2431 llvm::omp::Directive::OMPD_workshare)
2436 static mlir::omp::TeamsOp
2443 mlir::omp::TeamsOperands clauseOps;
2456 converter, llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(op), args);
2460 return genOpWithBody<mlir::omp::TeamsOp>(
2462 llvm::omp::Directive::OMPD_teams)
2482 mlir::omp::DistributeOperands distributeClauseOps;
2491 mlir::omp::LoopNestOperands loopNestClauseOps;
2499 auto distributeOp = genWrapperOp<mlir::omp::DistributeOp>(
2504 llvm::omp::Directive::OMPD_distribute, dsp);
2515 mlir::omp::WsloopOperands wsloopClauseOps;
2525 mlir::omp::LoopNestOperands loopNestClauseOps;
2535 auto wsloopOp = genWrapperOp<mlir::omp::WsloopOp>(
2540 llvm::omp::Directive::OMPD_do, dsp);
2552 mlir::omp::ParallelOperands parallelClauseOps;
2560 lower::omp::isLastItemInQueue(item, queue),
2582 mlir::omp::SimdOperands simdClauseOps;
2592 mlir::omp::LoopNestOperands loopNestClauseOps;
2603 genWrapperOp<mlir::omp::SimdOp>(converter, loc, simdClauseOps, simdArgs);
2607 llvm::omp::Directive::OMPD_simd, dsp);
2636 // Create parent omp.parallel first.
2637 mlir::omp::ParallelOperands parallelClauseOps;
2656 mlir::omp::DistributeOperands distributeClauseOps;
2660 mlir::omp::WsloopOperands wsloopClauseOps;
2665 mlir::omp::LoopNestOperands loopNestClauseOps;
2673 auto distributeOp = genWrapperOp<mlir::omp::DistributeOp>(
2681 auto wsloopOp = genWrapperOp<mlir::omp::WsloopOp>(
2688 llvm::omp::Directive::OMPD_distribute_parallel_do, dsp);
2704 // Create parent omp.parallel first.
2705 mlir::omp::ParallelOperands parallelClauseOps;
2724 mlir::omp::DistributeOperands distributeClauseOps;
2728 mlir::omp::WsloopOperands wsloopClauseOps;
2733 mlir::omp::SimdOperands simdClauseOps;
2738 mlir::omp::LoopNestOperands loopNestClauseOps;
2746 auto distributeOp = genWrapperOp<mlir::omp::DistributeOp>(
2754 auto wsloopOp = genWrapperOp<mlir::omp::WsloopOp>(
2763 genWrapperOp<mlir::omp::SimdOp>(converter, loc, simdClauseOps, simdArgs);
2771 llvm::omp::Directive::OMPD_distribute_parallel_do_simd, dsp);
2788 mlir::omp::DistributeOperands distributeClauseOps;
2792 mlir::omp::SimdOperands simdClauseOps;
2805 mlir::omp::LoopNestOperands loopNestClauseOps;
2813 auto distributeOp = genWrapperOp<mlir::omp::DistributeOp>(
2822 genWrapperOp<mlir::omp::SimdOp>(converter, loc, simdClauseOps, simdArgs);
2828 llvm::omp::Directive::OMPD_distribute_simd, dsp);
2844 mlir::omp::WsloopOperands wsloopClauseOps;
2849 mlir::omp::SimdOperands simdClauseOps;
2862 mlir::omp::LoopNestOperands loopNestClauseOps;
2872 auto wsloopOp = genWrapperOp<mlir::omp::WsloopOp>(
2881 genWrapperOp<mlir::omp::SimdOp>(converter, loc, simdClauseOps, simdArgs);
2887 llvm::omp::Directive::OMPD_do_simd, dsp);
2912 using llvm::omp::Directive;
2913 using lower::omp::matchLeafSequence;
2948 bool loopLeaf = llvm::omp::getDirectiveAssociation(item->id) ==
2949 llvm::omp::Association::Loop;
2959 switch (llvm::omp::Directive dir = item->id) {
2960 case llvm::omp::Directive::OMPD_barrier:
2963 case llvm::omp::Directive::OMPD_distribute:
2967 case llvm::omp::Directive::OMPD_do:
2970 case llvm::omp::Directive::OMPD_loop:
2973 case llvm::omp::Directive::OMPD_masked:
2976 case llvm::omp::Directive::OMPD_master:
2979 case llvm::omp::Directive::OMPD_ordered:
2983 case llvm::omp::Directive::OMPD_parallel:
2986 case llvm::omp::Directive::OMPD_scan:
2987 TODO(loc, "Unhandled directive " + llvm::omp::getOpenMPDirectiveName(dir));
2989 case llvm::omp::Directive::OMPD_section:
2993 case llvm::omp::Directive::OMPD_sections:
2999 case llvm::omp::Directive::OMPD_simd:
3002 case llvm::omp::Directive::OMPD_scope:
3005 case llvm::omp::Directive::OMPD_single:
3008 case llvm::omp::Directive::OMPD_target:
3011 case llvm::omp::Directive::OMPD_target_data:
3014 case llvm::omp::Directive::OMPD_target_enter_data:
3015 genTargetEnterExitUpdateDataOp<mlir::omp::TargetEnterDataOp>(
3018 case llvm::omp::Directive::OMPD_target_exit_data:
3019 genTargetEnterExitUpdateDataOp<mlir::omp::TargetExitDataOp>(
3022 case llvm::omp::Directive::OMPD_target_update:
3023 genTargetEnterExitUpdateDataOp<mlir::omp::TargetUpdateOp>(
3026 case llvm::omp::Directive::OMPD_task:
3029 case llvm::omp::Directive::OMPD_taskgroup:
3032 case llvm::omp::Directive::OMPD_taskloop:
3035 case llvm::omp::Directive::OMPD_taskwait:
3038 case llvm::omp::Directive::OMPD_taskyield:
3041 case llvm::omp::Directive::OMPD_teams:
3044 case llvm::omp::Directive::OMPD_tile:
3045 case llvm::omp::Directive::OMPD_unroll:
3047 llvm::omp::getOpenMPDirectiveName(dir) + ")");
3048 // case llvm::omp::Directive::OMPD_workdistribute:
3049 case llvm::omp::Directive::OMPD_workshare:
3055 assert(!llvm::omp::isLeafConstruct(dir) &&
3104 mlir::omp::DeclareTargetOperands clauseOps;
3122 std::get<mlir::omp::DeclareTargetCaptureClause>(symClause),
3171 if (directive.v == llvm::omp::Directive::OMPD_ordered) {
3202 llvm::omp::Directive::OMPD_flush, clauses)};
3322 assert(llvm::omp::blockConstructSet.test(origDirective) &&
3361 parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(clause.id));
3366 llvm::omp::Directive directive =
3387 llvm::omp::Directive::OMPD_critical, clauses)};
3434 llvm::omp::Directive directive =
3468 llvm::omp::Directive directive =
3478 next->id != llvm::omp::Directive::OMPD_sections) {
3487 assert(next->id == llvm::omp::Directive::OMPD_sections);
3509 if (mlir::isa<mlir::omp::AtomicUpdateOp, mlir::omp::DeclareReductionOp,
3510 mlir::omp::LoopNestOp>(op))
3511 return builder.create<mlir::omp::YieldOp>(loc);
3512 return builder.create<mlir::omp::TerminatorOp>(loc);
3519 const parser::OpenMPConstruct &omp) {
3521 genOMP(converter, symTable, semaCtx, eval, omp);
3527 const parser::OpenMPDeclarativeConstruct &omp) {
3528 genOMP(converter, symTable, semaCtx, eval, omp);
3566 if (mlir::isa<mlir::omp::ThreadprivateOp>(commonValue.getDefiningOp())) {
3573 firOpBuilder.create<mlir::omp::ThreadprivateOp>(
3596 symThreadprivateValue = firOpBuilder.create<mlir::omp::ThreadprivateOp>(
3609 if (mlir::isa<mlir::omp::ThreadprivateOp>(op))
3612 symThreadprivateValue = firOpBuilder.create<mlir::omp::ThreadprivateOp>(
3639 const parser::OpenMPConstruct &omp) {
3640 llvm::omp::Directive dir = llvm::omp::Directive::OMPD_unknown;
3641 if (const auto *block = std::get_if<parser::OpenMPBlockConstruct>(&omp.u)) {
3645 std::get_if<parser::OpenMPLoopConstruct>(&omp.u)) {
3649 return llvm::omp::allTargetSet.test(dir);
3676 mlir::omp::DeclareTargetDeviceType targetType =
3678 .value_or(mlir::omp::DeclareTargetDeviceType::host);
3679 return targetType != mlir::omp::DeclareTargetDeviceType::host;
3712 if (!deviceCodeFound && devType != mlir::omp::DeclareTargetDeviceType::host)
3724 using MlirRequires = mlir::omp::ClauseRequires;
3728 llvm::dyn_cast<mlir::omp::OffloadModuleInterface>(mod)) {
3742 // Use pre-populated omp.requires module attribute if it was set, so that