1 //===-- lib/Evaluate/fold-integer.cpp -------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "fold-implementation.h" 10 #include "fold-matmul.h" 11 #include "fold-reduction.h" 12 #include "flang/Evaluate/check-expression.h" 13 14 namespace Fortran::evaluate { 15 16 // Given a collection of ConstantSubscripts values, package them as a Constant. 17 // Return scalar value if asScalar == true and shape-dim array otherwise. 18 template <typename T> 19 Expr<T> PackageConstantBounds( 20 const ConstantSubscripts &&bounds, bool asScalar = false) { 21 if (asScalar) { 22 return Expr<T>{Constant<T>{bounds.at(0)}}; 23 } else { 24 // As rank-dim array 25 const int rank{GetRank(bounds)}; 26 std::vector<Scalar<T>> packed(rank); 27 std::transform(bounds.begin(), bounds.end(), packed.begin(), 28 [](ConstantSubscript x) { return Scalar<T>(x); }); 29 return Expr<T>{Constant<T>{std::move(packed), ConstantSubscripts{rank}}}; 30 } 31 } 32 33 // If a DIM= argument to LBOUND(), UBOUND(), or SIZE() exists and has a valid 34 // constant value, return in "dimVal" that value, less 1 (to make it suitable 35 // for use as a C++ vector<> index). Also check for erroneous constant values 36 // and returns false on error. 37 static bool CheckDimArg(const std::optional<ActualArgument> &dimArg, 38 const Expr<SomeType> &array, parser::ContextualMessages &messages, 39 bool isLBound, std::optional<int> &dimVal) { 40 dimVal.reset(); 41 if (int rank{array.Rank()}; rank > 0 || IsAssumedRank(array)) { 42 auto named{ExtractNamedEntity(array)}; 43 if (auto dim64{ToInt64(dimArg)}) { 44 if (*dim64 < 1) { 45 messages.Say("DIM=%jd dimension must be positive"_err_en_US, *dim64); 46 return false; 47 } else if (!IsAssumedRank(array) && *dim64 > rank) { 48 messages.Say( 49 "DIM=%jd dimension is out of range for rank-%d array"_err_en_US, 50 *dim64, rank); 51 return false; 52 } else if (!isLBound && named && 53 semantics::IsAssumedSizeArray(named->GetLastSymbol()) && 54 *dim64 == rank) { 55 messages.Say( 56 "DIM=%jd dimension is out of range for rank-%d assumed-size array"_err_en_US, 57 *dim64, rank); 58 return false; 59 } else if (IsAssumedRank(array)) { 60 if (*dim64 > common::maxRank) { 61 messages.Say( 62 "DIM=%jd dimension is too large for any array (maximum rank %d)"_err_en_US, 63 *dim64, common::maxRank); 64 return false; 65 } 66 } else { 67 dimVal = static_cast<int>(*dim64 - 1); // 1-based to 0-based 68 } 69 } 70 } 71 return true; 72 } 73 74 static bool CheckCoDimArg(const std::optional<ActualArgument> &dimArg, 75 const Symbol &symbol, parser::ContextualMessages &messages, 76 std::optional<int> &dimVal) { 77 dimVal.reset(); 78 if (int corank{symbol.Corank()}; corank > 0) { 79 if (auto dim64{ToInt64(dimArg)}) { 80 if (*dim64 < 1) { 81 messages.Say("DIM=%jd dimension must be positive"_err_en_US, *dim64); 82 return false; 83 } else if (*dim64 > corank) { 84 messages.Say( 85 "DIM=%jd dimension is out of range for corank-%d coarray"_err_en_US, 86 *dim64, corank); 87 return false; 88 } else { 89 dimVal = static_cast<int>(*dim64 - 1); // 1-based to 0-based 90 } 91 } 92 } 93 return true; 94 } 95 96 // Class to retrieve the constant bound of an expression which is an 97 // array that devolves to a type of Constant<T> 98 class GetConstantArrayBoundHelper { 99 public: 100 template <typename T> 101 static Expr<T> GetLbound( 102 const Expr<SomeType> &array, std::optional<int> dim) { 103 return PackageConstantBounds<T>( 104 GetConstantArrayBoundHelper(dim, /*getLbound=*/true).Get(array), 105 dim.has_value()); 106 } 107 108 template <typename T> 109 static Expr<T> GetUbound( 110 const Expr<SomeType> &array, std::optional<int> dim) { 111 return PackageConstantBounds<T>( 112 GetConstantArrayBoundHelper(dim, /*getLbound=*/false).Get(array), 113 dim.has_value()); 114 } 115 116 private: 117 GetConstantArrayBoundHelper( 118 std::optional<ConstantSubscript> dim, bool getLbound) 119 : dim_{dim}, getLbound_{getLbound} {} 120 121 template <typename T> ConstantSubscripts Get(const T &) { 122 // The method is needed for template expansion, but we should never get 123 // here in practice. 124 CHECK(false); 125 return {0}; 126 } 127 128 template <typename T> ConstantSubscripts Get(const Constant<T> &x) { 129 if (getLbound_) { 130 // Return the lower bound 131 if (dim_) { 132 return {x.lbounds().at(*dim_)}; 133 } else { 134 return x.lbounds(); 135 } 136 } else { 137 // Return the upper bound 138 if (arrayFromParenthesesExpr) { 139 // Underlying array comes from (x) expression - return shapes 140 if (dim_) { 141 return {x.shape().at(*dim_)}; 142 } else { 143 return x.shape(); 144 } 145 } else { 146 return x.ComputeUbounds(dim_); 147 } 148 } 149 } 150 151 template <typename T> ConstantSubscripts Get(const Parentheses<T> &x) { 152 // Case of temp variable inside parentheses - return [1, ... 1] for lower 153 // bounds and shape for upper bounds 154 if (getLbound_) { 155 return ConstantSubscripts(x.Rank(), ConstantSubscript{1}); 156 } else { 157 // Indicate that underlying array comes from parentheses expression. 158 // Continue to unwrap expression until we hit a constant 159 arrayFromParenthesesExpr = true; 160 return Get(x.left()); 161 } 162 } 163 164 template <typename T> ConstantSubscripts Get(const Expr<T> &x) { 165 // recurse through Expr<T>'a until we hit a constant 166 return common::visit([&](const auto &inner) { return Get(inner); }, 167 // [&](const auto &) { return 0; }, 168 x.u); 169 } 170 171 const std::optional<ConstantSubscript> dim_; 172 const bool getLbound_; 173 bool arrayFromParenthesesExpr{false}; 174 }; 175 176 template <int KIND> 177 Expr<Type<TypeCategory::Integer, KIND>> LBOUND(FoldingContext &context, 178 FunctionRef<Type<TypeCategory::Integer, KIND>> &&funcRef) { 179 using T = Type<TypeCategory::Integer, KIND>; 180 ActualArguments &args{funcRef.arguments()}; 181 if (const auto *array{UnwrapExpr<Expr<SomeType>>(args[0])}) { 182 std::optional<int> dim; 183 if (funcRef.Rank() == 0) { 184 // Optional DIM= argument is present: result is scalar. 185 if (!CheckDimArg(args[1], *array, context.messages(), true, dim)) { 186 return MakeInvalidIntrinsic<T>(std::move(funcRef)); 187 } else if (!dim) { 188 // DIM= is present but not constant, or error 189 return Expr<T>{std::move(funcRef)}; 190 } 191 } 192 if (IsAssumedRank(*array)) { 193 // Would like to return 1 if DIM=.. is present, but that would be 194 // hiding a runtime error if the DIM= were too large (including 195 // the case of an assumed-rank argument that's scalar). 196 } else if (int rank{array->Rank()}; rank > 0) { 197 bool lowerBoundsAreOne{true}; 198 if (auto named{ExtractNamedEntity(*array)}) { 199 const Symbol &symbol{named->GetLastSymbol()}; 200 if (symbol.Rank() == rank) { 201 lowerBoundsAreOne = false; 202 if (dim) { 203 if (auto lb{GetLBOUND(context, *named, *dim)}) { 204 return Fold(context, ConvertToType<T>(std::move(*lb))); 205 } 206 } else if (auto extents{ 207 AsExtentArrayExpr(GetLBOUNDs(context, *named))}) { 208 return Fold(context, 209 ConvertToType<T>(Expr<ExtentType>{std::move(*extents)})); 210 } 211 } else { 212 lowerBoundsAreOne = symbol.Rank() == 0; // LBOUND(array%component) 213 } 214 } 215 if (IsActuallyConstant(*array)) { 216 return GetConstantArrayBoundHelper::GetLbound<T>(*array, dim); 217 } 218 if (lowerBoundsAreOne) { 219 ConstantSubscripts ones(rank, ConstantSubscript{1}); 220 return PackageConstantBounds<T>(std::move(ones), dim.has_value()); 221 } 222 } 223 } 224 return Expr<T>{std::move(funcRef)}; 225 } 226 227 template <int KIND> 228 Expr<Type<TypeCategory::Integer, KIND>> UBOUND(FoldingContext &context, 229 FunctionRef<Type<TypeCategory::Integer, KIND>> &&funcRef) { 230 using T = Type<TypeCategory::Integer, KIND>; 231 ActualArguments &args{funcRef.arguments()}; 232 if (auto *array{UnwrapExpr<Expr<SomeType>>(args[0])}) { 233 std::optional<int> dim; 234 if (funcRef.Rank() == 0) { 235 // Optional DIM= argument is present: result is scalar. 236 if (!CheckDimArg(args[1], *array, context.messages(), false, dim)) { 237 return MakeInvalidIntrinsic<T>(std::move(funcRef)); 238 } else if (!dim) { 239 // DIM= is present but not constant, or error 240 return Expr<T>{std::move(funcRef)}; 241 } 242 } 243 if (IsAssumedRank(*array)) { 244 } else if (int rank{array->Rank()}; rank > 0) { 245 bool takeBoundsFromShape{true}; 246 if (auto named{ExtractNamedEntity(*array)}) { 247 const Symbol &symbol{named->GetLastSymbol()}; 248 if (symbol.Rank() == rank) { 249 takeBoundsFromShape = false; 250 if (dim) { 251 if (auto ub{GetUBOUND(context, *named, *dim)}) { 252 return Fold(context, ConvertToType<T>(std::move(*ub))); 253 } 254 } else { 255 Shape ubounds{GetUBOUNDs(context, *named)}; 256 if (semantics::IsAssumedSizeArray(symbol)) { 257 CHECK(!ubounds.back()); 258 ubounds.back() = ExtentExpr{-1}; 259 } 260 if (auto extents{AsExtentArrayExpr(ubounds)}) { 261 return Fold(context, 262 ConvertToType<T>(Expr<ExtentType>{std::move(*extents)})); 263 } 264 } 265 } else { 266 takeBoundsFromShape = symbol.Rank() == 0; // UBOUND(array%component) 267 } 268 } 269 if (IsActuallyConstant(*array)) { 270 return GetConstantArrayBoundHelper::GetUbound<T>(*array, dim); 271 } 272 if (takeBoundsFromShape) { 273 if (auto shape{GetContextFreeShape(context, *array)}) { 274 if (dim) { 275 if (auto &dimSize{shape->at(*dim)}) { 276 return Fold(context, 277 ConvertToType<T>(Expr<ExtentType>{std::move(*dimSize)})); 278 } 279 } else if (auto shapeExpr{AsExtentArrayExpr(*shape)}) { 280 return Fold(context, ConvertToType<T>(std::move(*shapeExpr))); 281 } 282 } 283 } 284 } 285 } 286 return Expr<T>{std::move(funcRef)}; 287 } 288 289 // LCOBOUND() & UCOBOUND() 290 template <int KIND> 291 Expr<Type<TypeCategory::Integer, KIND>> COBOUND(FoldingContext &context, 292 FunctionRef<Type<TypeCategory::Integer, KIND>> &&funcRef, bool isUCOBOUND) { 293 using T = Type<TypeCategory::Integer, KIND>; 294 ActualArguments &args{funcRef.arguments()}; 295 if (const Symbol * coarray{UnwrapWholeSymbolOrComponentDataRef(args[0])}) { 296 std::optional<int> dim; 297 if (funcRef.Rank() == 0) { 298 // Optional DIM= argument is present: result is scalar. 299 if (!CheckCoDimArg(args[1], *coarray, context.messages(), dim)) { 300 return MakeInvalidIntrinsic<T>(std::move(funcRef)); 301 } else if (!dim) { 302 // DIM= is present but not constant, or error 303 return Expr<T>{std::move(funcRef)}; 304 } 305 } 306 if (dim) { 307 if (auto cb{isUCOBOUND ? GetUCOBOUND(*coarray, *dim) 308 : GetLCOBOUND(*coarray, *dim)}) { 309 return Fold(context, ConvertToType<T>(std::move(*cb))); 310 } 311 } else if (auto cbs{ 312 AsExtentArrayExpr(isUCOBOUND ? GetUCOBOUNDs(*coarray) 313 : GetLCOBOUNDs(*coarray))}) { 314 return Fold(context, ConvertToType<T>(Expr<ExtentType>{std::move(*cbs)})); 315 } 316 } 317 return Expr<T>{std::move(funcRef)}; 318 } 319 320 // COUNT() 321 template <typename T, int MASK_KIND> class CountAccumulator { 322 using MaskT = Type<TypeCategory::Logical, MASK_KIND>; 323 324 public: 325 CountAccumulator(const Constant<MaskT> &mask) : mask_{mask} {} 326 void operator()( 327 Scalar<T> &element, const ConstantSubscripts &at, bool /*first*/) { 328 if (mask_.At(at).IsTrue()) { 329 auto incremented{element.AddSigned(Scalar<T>{1})}; 330 overflow_ |= incremented.overflow; 331 element = incremented.value; 332 } 333 } 334 bool overflow() const { return overflow_; } 335 void Done(Scalar<T> &) const {} 336 337 private: 338 const Constant<MaskT> &mask_; 339 bool overflow_{false}; 340 }; 341 342 template <typename T, int maskKind> 343 static Expr<T> FoldCount(FoldingContext &context, FunctionRef<T> &&ref) { 344 using KindLogical = Type<TypeCategory::Logical, maskKind>; 345 static_assert(T::category == TypeCategory::Integer); 346 std::optional<int> dim; 347 if (std::optional<ArrayAndMask<KindLogical>> arrayAndMask{ 348 ProcessReductionArgs<KindLogical>( 349 context, ref.arguments(), dim, /*ARRAY=*/0, /*DIM=*/1)}) { 350 CountAccumulator<T, maskKind> accumulator{arrayAndMask->array}; 351 Constant<T> result{DoReduction<T>(arrayAndMask->array, arrayAndMask->mask, 352 dim, Scalar<T>{}, accumulator)}; 353 if (accumulator.overflow() && 354 context.languageFeatures().ShouldWarn( 355 common::UsageWarning::FoldingException)) { 356 context.messages().Say(common::UsageWarning::FoldingException, 357 "Result of intrinsic function COUNT overflows its result type"_warn_en_US); 358 } 359 return Expr<T>{std::move(result)}; 360 } 361 return Expr<T>{std::move(ref)}; 362 } 363 364 // FINDLOC(), MAXLOC(), & MINLOC() 365 enum class WhichLocation { Findloc, Maxloc, Minloc }; 366 template <WhichLocation WHICH> class LocationHelper { 367 public: 368 LocationHelper( 369 DynamicType &&type, ActualArguments &arg, FoldingContext &context) 370 : type_{type}, arg_{arg}, context_{context} {} 371 using Result = std::optional<Constant<SubscriptInteger>>; 372 using Types = std::conditional_t<WHICH == WhichLocation::Findloc, 373 AllIntrinsicTypes, RelationalTypes>; 374 375 template <typename T> Result Test() const { 376 if (T::category != type_.category() || T::kind != type_.kind()) { 377 return std::nullopt; 378 } 379 CHECK(arg_.size() == (WHICH == WhichLocation::Findloc ? 6 : 5)); 380 Folder<T> folder{context_}; 381 Constant<T> *array{folder.Folding(arg_[0])}; 382 if (!array) { 383 return std::nullopt; 384 } 385 std::optional<Constant<T>> value; 386 if constexpr (WHICH == WhichLocation::Findloc) { 387 if (const Constant<T> *p{folder.Folding(arg_[1])}) { 388 value.emplace(*p); 389 } else { 390 return std::nullopt; 391 } 392 } 393 std::optional<int> dim; 394 Constant<LogicalResult> *mask{ 395 GetReductionMASK(arg_[maskArg], array->shape(), context_)}; 396 if ((!mask && arg_[maskArg]) || 397 !CheckReductionDIM(dim, context_, arg_, dimArg, array->Rank())) { 398 return std::nullopt; 399 } 400 bool back{false}; 401 if (arg_[backArg]) { 402 const auto *backConst{ 403 Folder<LogicalResult>{context_, /*forOptionalArgument=*/true}.Folding( 404 arg_[backArg])}; 405 if (backConst) { 406 back = backConst->GetScalarValue().value().IsTrue(); 407 } else { 408 return std::nullopt; 409 } 410 } 411 const RelationalOperator relation{WHICH == WhichLocation::Findloc 412 ? RelationalOperator::EQ 413 : WHICH == WhichLocation::Maxloc 414 ? (back ? RelationalOperator::GE : RelationalOperator::GT) 415 : back ? RelationalOperator::LE 416 : RelationalOperator::LT}; 417 // Use lower bounds of 1 exclusively. 418 array->SetLowerBoundsToOne(); 419 ConstantSubscripts at{array->lbounds()}, maskAt, resultIndices, resultShape; 420 if (mask) { 421 if (auto scalarMask{mask->GetScalarValue()}) { 422 // Convert into array in case of scalar MASK= (for 423 // MAXLOC/MINLOC/FINDLOC mask should be conformable) 424 ConstantSubscript n{GetSize(array->shape())}; 425 std::vector<Scalar<LogicalResult>> mask_elements( 426 n, Scalar<LogicalResult>{scalarMask.value()}); 427 *mask = Constant<LogicalResult>{ 428 std::move(mask_elements), ConstantSubscripts{array->shape()}}; 429 } 430 mask->SetLowerBoundsToOne(); 431 maskAt = mask->lbounds(); 432 } 433 if (dim) { // DIM= 434 if (*dim < 1 || *dim > array->Rank()) { 435 context_.messages().Say("DIM=%d is out of range"_err_en_US, *dim); 436 return std::nullopt; 437 } 438 int zbDim{*dim - 1}; 439 resultShape = array->shape(); 440 resultShape.erase( 441 resultShape.begin() + zbDim); // scalar if array is vector 442 ConstantSubscript dimLength{array->shape()[zbDim]}; 443 ConstantSubscript n{GetSize(resultShape)}; 444 for (ConstantSubscript j{0}; j < n; ++j) { 445 ConstantSubscript hit{0}; 446 if constexpr (WHICH == WhichLocation::Maxloc || 447 WHICH == WhichLocation::Minloc) { 448 value.reset(); 449 } 450 for (ConstantSubscript k{0}; k < dimLength; 451 ++k, ++at[zbDim], mask && ++maskAt[zbDim]) { 452 if ((!mask || mask->At(maskAt).IsTrue()) && 453 IsHit(array->At(at), value, relation, back)) { 454 hit = at[zbDim]; 455 if constexpr (WHICH == WhichLocation::Findloc) { 456 if (!back) { 457 break; 458 } 459 } 460 } 461 } 462 resultIndices.emplace_back(hit); 463 at[zbDim] = std::max<ConstantSubscript>(dimLength, 1); 464 array->IncrementSubscripts(at); 465 at[zbDim] = 1; 466 if (mask) { 467 maskAt[zbDim] = mask->lbounds()[zbDim] + 468 std::max<ConstantSubscript>(dimLength, 1) - 1; 469 mask->IncrementSubscripts(maskAt); 470 maskAt[zbDim] = mask->lbounds()[zbDim]; 471 } 472 } 473 } else { // no DIM= 474 resultShape = ConstantSubscripts{array->Rank()}; // always a vector 475 ConstantSubscript n{GetSize(array->shape())}; 476 resultIndices = ConstantSubscripts(array->Rank(), 0); 477 for (ConstantSubscript j{0}; j < n; ++j, array->IncrementSubscripts(at), 478 mask && mask->IncrementSubscripts(maskAt)) { 479 if ((!mask || mask->At(maskAt).IsTrue()) && 480 IsHit(array->At(at), value, relation, back)) { 481 resultIndices = at; 482 if constexpr (WHICH == WhichLocation::Findloc) { 483 if (!back) { 484 break; 485 } 486 } 487 } 488 } 489 } 490 std::vector<Scalar<SubscriptInteger>> resultElements; 491 for (ConstantSubscript j : resultIndices) { 492 resultElements.emplace_back(j); 493 } 494 return Constant<SubscriptInteger>{ 495 std::move(resultElements), std::move(resultShape)}; 496 } 497 498 private: 499 template <typename T> 500 bool IsHit(typename Constant<T>::Element element, 501 std::optional<Constant<T>> &value, 502 [[maybe_unused]] RelationalOperator relation, 503 [[maybe_unused]] bool back) const { 504 std::optional<Expr<LogicalResult>> cmp; 505 bool result{true}; 506 if (value) { 507 if constexpr (T::category == TypeCategory::Logical) { 508 // array(at) .EQV. value? 509 static_assert(WHICH == WhichLocation::Findloc); 510 cmp.emplace(ConvertToType<LogicalResult>( 511 Expr<T>{LogicalOperation<T::kind>{LogicalOperator::Eqv, 512 Expr<T>{Constant<T>{element}}, Expr<T>{Constant<T>{*value}}}})); 513 } else { // compare array(at) to value 514 if constexpr (T::category == TypeCategory::Real && 515 (WHICH == WhichLocation::Maxloc || 516 WHICH == WhichLocation::Minloc)) { 517 if (value && value->GetScalarValue().value().IsNotANumber() && 518 (back || !element.IsNotANumber())) { 519 // Replace NaN 520 cmp.emplace(Constant<LogicalResult>{Scalar<LogicalResult>{true}}); 521 } 522 } 523 if (!cmp) { 524 cmp.emplace(PackageRelation(relation, Expr<T>{Constant<T>{element}}, 525 Expr<T>{Constant<T>{*value}})); 526 } 527 } 528 Expr<LogicalResult> folded{Fold(context_, std::move(*cmp))}; 529 result = GetScalarConstantValue<LogicalResult>(folded).value().IsTrue(); 530 } else { 531 // first unmasked element for MAXLOC/MINLOC - always take it 532 } 533 if constexpr (WHICH == WhichLocation::Maxloc || 534 WHICH == WhichLocation::Minloc) { 535 if (result) { 536 value.emplace(std::move(element)); 537 } 538 } 539 return result; 540 } 541 542 static constexpr int dimArg{WHICH == WhichLocation::Findloc ? 2 : 1}; 543 static constexpr int maskArg{dimArg + 1}; 544 static constexpr int backArg{maskArg + 2}; 545 546 DynamicType type_; 547 ActualArguments &arg_; 548 FoldingContext &context_; 549 }; 550 551 template <WhichLocation which> 552 static std::optional<Constant<SubscriptInteger>> FoldLocationCall( 553 ActualArguments &arg, FoldingContext &context) { 554 if (arg[0]) { 555 if (auto type{arg[0]->GetType()}) { 556 if constexpr (which == WhichLocation::Findloc) { 557 // Both ARRAY and VALUE are susceptible to conversion to a common 558 // comparison type. 559 if (arg[1]) { 560 if (auto valType{arg[1]->GetType()}) { 561 if (auto compareType{ComparisonType(*type, *valType)}) { 562 type = compareType; 563 } 564 } 565 } 566 } 567 return common::SearchTypes( 568 LocationHelper<which>{std::move(*type), arg, context}); 569 } 570 } 571 return std::nullopt; 572 } 573 574 template <WhichLocation which, typename T> 575 static Expr<T> FoldLocation(FoldingContext &context, FunctionRef<T> &&ref) { 576 static_assert(T::category == TypeCategory::Integer); 577 if (std::optional<Constant<SubscriptInteger>> found{ 578 FoldLocationCall<which>(ref.arguments(), context)}) { 579 return Expr<T>{Fold( 580 context, ConvertToType<T>(Expr<SubscriptInteger>{std::move(*found)}))}; 581 } else { 582 return Expr<T>{std::move(ref)}; 583 } 584 } 585 586 // for IALL, IANY, & IPARITY 587 template <typename T> 588 static Expr<T> FoldBitReduction(FoldingContext &context, FunctionRef<T> &&ref, 589 Scalar<T> (Scalar<T>::*operation)(const Scalar<T> &) const, 590 Scalar<T> identity) { 591 static_assert(T::category == TypeCategory::Integer || 592 T::category == TypeCategory::Unsigned); 593 std::optional<int> dim; 594 if (std::optional<ArrayAndMask<T>> arrayAndMask{ 595 ProcessReductionArgs<T>(context, ref.arguments(), dim, 596 /*ARRAY=*/0, /*DIM=*/1, /*MASK=*/2)}) { 597 OperationAccumulator<T> accumulator{arrayAndMask->array, operation}; 598 return Expr<T>{DoReduction<T>( 599 arrayAndMask->array, arrayAndMask->mask, dim, identity, accumulator)}; 600 } 601 return Expr<T>{std::move(ref)}; 602 } 603 604 // Common cases for INTEGER and UNSIGNED 605 template <typename T> 606 std::optional<Expr<T>> FoldIntrinsicFunctionCommon( 607 FoldingContext &context, FunctionRef<T> &funcRef) { 608 ActualArguments &args{funcRef.arguments()}; 609 auto *intrinsic{std::get_if<SpecificIntrinsic>(&funcRef.proc().u)}; 610 CHECK(intrinsic); 611 std::string name{intrinsic->name}; 612 using Int4 = Type<TypeCategory::Integer, 4>; 613 if (name == "bit_size") { 614 return Expr<T>{Scalar<T>::bits}; 615 } else if (name == "digits") { 616 if (const auto *cx{UnwrapExpr<Expr<SomeInteger>>(args[0])}) { 617 return Expr<T>{common::visit( 618 [](const auto &kx) { 619 return Scalar<ResultType<decltype(kx)>>::DIGITS; 620 }, 621 cx->u)}; 622 } else if (const auto *cx{UnwrapExpr<Expr<SomeUnsigned>>(args[0])}) { 623 return Expr<T>{common::visit( 624 [](const auto &kx) { 625 return Scalar<ResultType<decltype(kx)>>::DIGITS + 1; 626 }, 627 cx->u)}; 628 } else if (const auto *cx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 629 return Expr<T>{common::visit( 630 [](const auto &kx) { 631 return Scalar<ResultType<decltype(kx)>>::DIGITS; 632 }, 633 cx->u)}; 634 } else if (const auto *cx{UnwrapExpr<Expr<SomeComplex>>(args[0])}) { 635 return Expr<T>{common::visit( 636 [](const auto &kx) { 637 return Scalar<typename ResultType<decltype(kx)>::Part>::DIGITS; 638 }, 639 cx->u)}; 640 } 641 } else if (name == "dot_product") { 642 return FoldDotProduct<T>(context, std::move(funcRef)); 643 } else if (name == "dshiftl" || name == "dshiftr") { 644 const auto fptr{ 645 name == "dshiftl" ? &Scalar<T>::DSHIFTL : &Scalar<T>::DSHIFTR}; 646 // Third argument can be of any kind. However, it must be smaller or equal 647 // than BIT_SIZE. It can be converted to Int4 to simplify. 648 if (const auto *argCon{Folder<T>(context).Folding(args[0])}; 649 argCon && argCon->empty()) { 650 } else if (const auto *shiftCon{Folder<Int4>(context).Folding(args[2])}) { 651 for (const auto &scalar : shiftCon->values()) { 652 std::int64_t shiftVal{scalar.ToInt64()}; 653 if (shiftVal < 0) { 654 context.messages().Say("SHIFT=%jd count for %s is negative"_err_en_US, 655 std::intmax_t{shiftVal}, name); 656 break; 657 } else if (shiftVal > T::Scalar::bits) { 658 context.messages().Say( 659 "SHIFT=%jd count for %s is greater than %d"_err_en_US, 660 std::intmax_t{shiftVal}, name, T::Scalar::bits); 661 break; 662 } 663 } 664 } 665 return FoldElementalIntrinsic<T, T, T, Int4>(context, std::move(funcRef), 666 ScalarFunc<T, T, T, Int4>( 667 [&fptr](const Scalar<T> &i, const Scalar<T> &j, 668 const Scalar<Int4> &shift) -> Scalar<T> { 669 return std::invoke(fptr, i, j, static_cast<int>(shift.ToInt64())); 670 })); 671 } else if (name == "iand" || name == "ior" || name == "ieor") { 672 auto fptr{&Scalar<T>::IAND}; 673 if (name == "iand") { // done in fptr declaration 674 } else if (name == "ior") { 675 fptr = &Scalar<T>::IOR; 676 } else if (name == "ieor") { 677 fptr = &Scalar<T>::IEOR; 678 } else { 679 common::die("missing case to fold intrinsic function %s", name.c_str()); 680 } 681 return FoldElementalIntrinsic<T, T, T>( 682 context, std::move(funcRef), ScalarFunc<T, T, T>(fptr)); 683 } else if (name == "iall") { 684 return FoldBitReduction( 685 context, std::move(funcRef), &Scalar<T>::IAND, Scalar<T>{}.NOT()); 686 } else if (name == "iany") { 687 return FoldBitReduction( 688 context, std::move(funcRef), &Scalar<T>::IOR, Scalar<T>{}); 689 } else if (name == "ibclr" || name == "ibset") { 690 // Second argument can be of any kind. However, it must be smaller 691 // than BIT_SIZE. It can be converted to Int4 to simplify. 692 auto fptr{&Scalar<T>::IBCLR}; 693 if (name == "ibclr") { // done in fptr definition 694 } else if (name == "ibset") { 695 fptr = &Scalar<T>::IBSET; 696 } else { 697 common::die("missing case to fold intrinsic function %s", name.c_str()); 698 } 699 if (const auto *argCon{Folder<T>(context).Folding(args[0])}; 700 argCon && argCon->empty()) { 701 } else if (const auto *posCon{Folder<Int4>(context).Folding(args[1])}) { 702 for (const auto &scalar : posCon->values()) { 703 std::int64_t posVal{scalar.ToInt64()}; 704 if (posVal < 0) { 705 context.messages().Say( 706 "bit position for %s (%jd) is negative"_err_en_US, name, 707 std::intmax_t{posVal}); 708 break; 709 } else if (posVal >= T::Scalar::bits) { 710 context.messages().Say( 711 "bit position for %s (%jd) is not less than %d"_err_en_US, name, 712 std::intmax_t{posVal}, T::Scalar::bits); 713 break; 714 } 715 } 716 } 717 return FoldElementalIntrinsic<T, T, Int4>(context, std::move(funcRef), 718 ScalarFunc<T, T, Int4>( 719 [&](const Scalar<T> &i, const Scalar<Int4> &pos) -> Scalar<T> { 720 return std::invoke(fptr, i, static_cast<int>(pos.ToInt64())); 721 })); 722 } else if (name == "ibits") { 723 const auto *posCon{Folder<Int4>(context).Folding(args[1])}; 724 const auto *lenCon{Folder<Int4>(context).Folding(args[2])}; 725 if (const auto *argCon{Folder<T>(context).Folding(args[0])}; 726 argCon && argCon->empty()) { 727 } else { 728 std::size_t posCt{posCon ? posCon->size() : 0}; 729 std::size_t lenCt{lenCon ? lenCon->size() : 0}; 730 std::size_t n{std::max(posCt, lenCt)}; 731 for (std::size_t j{0}; j < n; ++j) { 732 int posVal{j < posCt || posCt == 1 733 ? static_cast<int>(posCon->values()[j % posCt].ToInt64()) 734 : 0}; 735 int lenVal{j < lenCt || lenCt == 1 736 ? static_cast<int>(lenCon->values()[j % lenCt].ToInt64()) 737 : 0}; 738 if (posVal < 0) { 739 context.messages().Say( 740 "bit position for IBITS(POS=%jd) is negative"_err_en_US, 741 std::intmax_t{posVal}); 742 break; 743 } else if (lenVal < 0) { 744 context.messages().Say( 745 "bit length for IBITS(LEN=%jd) is negative"_err_en_US, 746 std::intmax_t{lenVal}); 747 break; 748 } else if (posVal + lenVal > T::Scalar::bits) { 749 context.messages().Say( 750 "IBITS() must have POS+LEN (>=%jd) no greater than %d"_err_en_US, 751 std::intmax_t{posVal + lenVal}, T::Scalar::bits); 752 break; 753 } 754 } 755 } 756 return FoldElementalIntrinsic<T, T, Int4, Int4>(context, std::move(funcRef), 757 ScalarFunc<T, T, Int4, Int4>( 758 [&](const Scalar<T> &i, const Scalar<Int4> &pos, 759 const Scalar<Int4> &len) -> Scalar<T> { 760 return i.IBITS(static_cast<int>(pos.ToInt64()), 761 static_cast<int>(len.ToInt64())); 762 })); 763 } else if (name == "int" || name == "int2" || name == "int8" || 764 name == "uint") { 765 if (auto *expr{UnwrapExpr<Expr<SomeType>>(args[0])}) { 766 return common::visit( 767 [&](auto &&x) -> Expr<T> { 768 using From = std::decay_t<decltype(x)>; 769 if constexpr (std::is_same_v<From, BOZLiteralConstant> || 770 IsNumericCategoryExpr<From>()) { 771 return Fold(context, ConvertToType<T>(std::move(x))); 772 } 773 DIE("int() argument type not valid"); 774 }, 775 std::move(expr->u)); 776 } 777 } else if (name == "iparity") { 778 return FoldBitReduction( 779 context, std::move(funcRef), &Scalar<T>::IEOR, Scalar<T>{}); 780 } else if (name == "ishft" || name == "ishftc") { 781 const auto *argCon{Folder<T>(context).Folding(args[0])}; 782 const auto *shiftCon{Folder<Int4>(context).Folding(args[1])}; 783 const auto *shiftVals{shiftCon ? &shiftCon->values() : nullptr}; 784 const auto *sizeCon{args.size() == 3 785 ? Folder<Int4>{context, /*forOptionalArgument=*/true}.Folding( 786 args[2]) 787 : nullptr}; 788 const auto *sizeVals{sizeCon ? &sizeCon->values() : nullptr}; 789 if ((argCon && argCon->empty()) || !shiftVals || shiftVals->empty() || 790 (sizeVals && sizeVals->empty())) { 791 // size= and shift= values don't need to be checked 792 } else { 793 for (const auto &scalar : *shiftVals) { 794 std::int64_t shiftVal{scalar.ToInt64()}; 795 if (shiftVal < -T::Scalar::bits) { 796 context.messages().Say( 797 "SHIFT=%jd count for %s is less than %d"_err_en_US, 798 std::intmax_t{shiftVal}, name, -T::Scalar::bits); 799 break; 800 } else if (shiftVal > T::Scalar::bits) { 801 context.messages().Say( 802 "SHIFT=%jd count for %s is greater than %d"_err_en_US, 803 std::intmax_t{shiftVal}, name, T::Scalar::bits); 804 break; 805 } 806 } 807 if (sizeVals) { 808 for (const auto &scalar : *sizeVals) { 809 std::int64_t sizeVal{scalar.ToInt64()}; 810 if (sizeVal <= 0) { 811 context.messages().Say( 812 "SIZE=%jd count for ishftc is not positive"_err_en_US, 813 std::intmax_t{sizeVal}, name); 814 break; 815 } else if (sizeVal > T::Scalar::bits) { 816 context.messages().Say( 817 "SIZE=%jd count for ishftc is greater than %d"_err_en_US, 818 std::intmax_t{sizeVal}, T::Scalar::bits); 819 break; 820 } 821 } 822 if (shiftVals->size() == 1 || sizeVals->size() == 1 || 823 shiftVals->size() == sizeVals->size()) { 824 auto iters{std::max(shiftVals->size(), sizeVals->size())}; 825 for (std::size_t j{0}; j < iters; ++j) { 826 auto shiftVal{static_cast<int>( 827 (*shiftVals)[j % shiftVals->size()].ToInt64())}; 828 auto sizeVal{ 829 static_cast<int>((*sizeVals)[j % sizeVals->size()].ToInt64())}; 830 if (sizeVal > 0 && std::abs(shiftVal) > sizeVal) { 831 context.messages().Say( 832 "SHIFT=%jd count for ishftc is greater in magnitude than SIZE=%jd"_err_en_US, 833 std::intmax_t{shiftVal}, std::intmax_t{sizeVal}); 834 break; 835 } 836 } 837 } 838 } 839 } 840 if (name == "ishft") { 841 return FoldElementalIntrinsic<T, T, Int4>(context, std::move(funcRef), 842 ScalarFunc<T, T, Int4>( 843 [&](const Scalar<T> &i, const Scalar<Int4> &shift) -> Scalar<T> { 844 return i.ISHFT(static_cast<int>(shift.ToInt64())); 845 })); 846 } else if (!args.at(2)) { // ISHFTC(no SIZE=) 847 return FoldElementalIntrinsic<T, T, Int4>(context, std::move(funcRef), 848 ScalarFunc<T, T, Int4>( 849 [&](const Scalar<T> &i, const Scalar<Int4> &shift) -> Scalar<T> { 850 return i.ISHFTC(static_cast<int>(shift.ToInt64())); 851 })); 852 } else { // ISHFTC(with SIZE=) 853 return FoldElementalIntrinsic<T, T, Int4, Int4>(context, 854 std::move(funcRef), 855 ScalarFunc<T, T, Int4, Int4>( 856 [&](const Scalar<T> &i, const Scalar<Int4> &shift, 857 const Scalar<Int4> &size) -> Scalar<T> { 858 auto shiftVal{static_cast<int>(shift.ToInt64())}; 859 auto sizeVal{static_cast<int>(size.ToInt64())}; 860 return i.ISHFTC(shiftVal, sizeVal); 861 }), 862 /*hasOptionalArgument=*/true); 863 } 864 } else if (name == "izext" || name == "jzext") { 865 if (args.size() == 1) { 866 if (auto *expr{UnwrapExpr<Expr<SomeKind<T::category>>>(args[0])}) { 867 // Rewrite to IAND(INT(n,k),255_k) for k=KIND(T) 868 intrinsic->name = "iand"; 869 auto converted{ConvertToType<T>(std::move(*expr))}; 870 *expr = 871 Fold(context, Expr<SomeKind<T::category>>{std::move(converted)}); 872 args.emplace_back(AsGenericExpr(Expr<T>{Scalar<T>{255}})); 873 return FoldIntrinsicFunction(context, std::move(funcRef)); 874 } 875 } 876 } else if (name == "maskl" || name == "maskr" || name == "umaskl" || 877 name == "umaskr") { 878 // Argument can be of any kind but value has to be smaller than BIT_SIZE. 879 // It can be safely converted to Int4 to simplify. 880 const auto fptr{name == "maskl" || name == "umaskl" ? &Scalar<T>::MASKL 881 : &Scalar<T>::MASKR}; 882 return FoldElementalIntrinsic<T, Int4>(context, std::move(funcRef), 883 ScalarFunc<T, Int4>([&fptr](const Scalar<Int4> &places) -> Scalar<T> { 884 return fptr(static_cast<int>(places.ToInt64())); 885 })); 886 } else if (name == "matmul") { 887 return FoldMatmul(context, std::move(funcRef)); 888 } else if (name == "max") { 889 return FoldMINorMAX(context, std::move(funcRef), Ordering::Greater); 890 } else if (name == "maxval") { 891 return FoldMaxvalMinval<T>(context, std::move(funcRef), 892 RelationalOperator::GT, 893 T::category == TypeCategory::Unsigned ? typename T::Scalar{} 894 : T::Scalar::Least()); 895 } else if (name == "merge_bits") { 896 return FoldElementalIntrinsic<T, T, T, T>( 897 context, std::move(funcRef), &Scalar<T>::MERGE_BITS); 898 } else if (name == "min") { 899 return FoldMINorMAX(context, std::move(funcRef), Ordering::Less); 900 } else if (name == "minval") { 901 return FoldMaxvalMinval<T>(context, std::move(funcRef), 902 RelationalOperator::LT, 903 T::category == TypeCategory::Unsigned ? typename T::Scalar{}.NOT() 904 : T::Scalar::HUGE()); 905 } else if (name == "not") { 906 return FoldElementalIntrinsic<T, T>( 907 context, std::move(funcRef), &Scalar<T>::NOT); 908 } else if (name == "product") { 909 return FoldProduct<T>(context, std::move(funcRef), Scalar<T>{1}); 910 } else if (name == "radix") { 911 return Expr<T>{2}; 912 } else if (name == "shifta" || name == "shiftr" || name == "shiftl") { 913 // Second argument can be of any kind. However, it must be smaller or 914 // equal than BIT_SIZE. It can be converted to Int4 to simplify. 915 auto fptr{&Scalar<T>::SHIFTA}; 916 if (name == "shifta") { // done in fptr definition 917 } else if (name == "shiftr") { 918 fptr = &Scalar<T>::SHIFTR; 919 } else if (name == "shiftl") { 920 fptr = &Scalar<T>::SHIFTL; 921 } else { 922 common::die("missing case to fold intrinsic function %s", name.c_str()); 923 } 924 if (const auto *argCon{Folder<T>(context).Folding(args[0])}; 925 argCon && argCon->empty()) { 926 } else if (const auto *shiftCon{Folder<Int4>(context).Folding(args[1])}) { 927 for (const auto &scalar : shiftCon->values()) { 928 std::int64_t shiftVal{scalar.ToInt64()}; 929 if (shiftVal < 0) { 930 context.messages().Say("SHIFT=%jd count for %s is negative"_err_en_US, 931 std::intmax_t{shiftVal}, name, -T::Scalar::bits); 932 break; 933 } else if (shiftVal > T::Scalar::bits) { 934 context.messages().Say( 935 "SHIFT=%jd count for %s is greater than %d"_err_en_US, 936 std::intmax_t{shiftVal}, name, T::Scalar::bits); 937 break; 938 } 939 } 940 } 941 return FoldElementalIntrinsic<T, T, Int4>(context, std::move(funcRef), 942 ScalarFunc<T, T, Int4>( 943 [&](const Scalar<T> &i, const Scalar<Int4> &shift) -> Scalar<T> { 944 return std::invoke(fptr, i, static_cast<int>(shift.ToInt64())); 945 })); 946 } else if (name == "sum") { 947 return FoldSum<T>(context, std::move(funcRef)); 948 } 949 return std::nullopt; 950 } 951 952 template <int KIND> 953 Expr<Type<TypeCategory::Integer, KIND>> FoldIntrinsicFunction( 954 FoldingContext &context, 955 FunctionRef<Type<TypeCategory::Integer, KIND>> &&funcRef) { 956 if (auto foldedCommon{FoldIntrinsicFunctionCommon(context, funcRef)}) { 957 return std::move(*foldedCommon); 958 } 959 960 using T = Type<TypeCategory::Integer, KIND>; 961 ActualArguments &args{funcRef.arguments()}; 962 auto *intrinsic{std::get_if<SpecificIntrinsic>(&funcRef.proc().u)}; 963 CHECK(intrinsic); 964 std::string name{intrinsic->name}; 965 966 auto FromInt64{[&name, &context](std::int64_t n) { 967 Scalar<T> result{n}; 968 if (result.ToInt64() != n && 969 context.languageFeatures().ShouldWarn( 970 common::UsageWarning::FoldingException)) { 971 context.messages().Say(common::UsageWarning::FoldingException, 972 "Result of intrinsic function '%s' (%jd) overflows its result type"_warn_en_US, 973 name, std::intmax_t{n}); 974 } 975 return result; 976 }}; 977 978 if (name == "abs") { // incl. babs, iiabs, jiaabs, & kiabs 979 return FoldElementalIntrinsic<T, T>(context, std::move(funcRef), 980 ScalarFunc<T, T>([&context](const Scalar<T> &i) -> Scalar<T> { 981 typename Scalar<T>::ValueWithOverflow j{i.ABS()}; 982 if (j.overflow && 983 context.languageFeatures().ShouldWarn( 984 common::UsageWarning::FoldingException)) { 985 context.messages().Say(common::UsageWarning::FoldingException, 986 "abs(integer(kind=%d)) folding overflowed"_warn_en_US, KIND); 987 } 988 return j.value; 989 })); 990 } else if (name == "ceiling" || name == "floor" || name == "nint") { 991 if (const auto *cx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 992 // NINT rounds ties away from zero, not to even 993 common::RoundingMode mode{name == "ceiling" ? common::RoundingMode::Up 994 : name == "floor" ? common::RoundingMode::Down 995 : common::RoundingMode::TiesAwayFromZero}; 996 return common::visit( 997 [&](const auto &kx) { 998 using TR = ResultType<decltype(kx)>; 999 return FoldElementalIntrinsic<T, TR>(context, std::move(funcRef), 1000 ScalarFunc<T, TR>([&](const Scalar<TR> &x) { 1001 auto y{x.template ToInteger<Scalar<T>>(mode)}; 1002 if (y.flags.test(RealFlag::Overflow) && 1003 context.languageFeatures().ShouldWarn( 1004 common::UsageWarning::FoldingException)) { 1005 context.messages().Say( 1006 common::UsageWarning::FoldingException, 1007 "%s intrinsic folding overflow"_warn_en_US, name); 1008 } 1009 return y.value; 1010 })); 1011 }, 1012 cx->u); 1013 } 1014 } else if (name == "count") { 1015 int maskKind = args[0]->GetType()->kind(); 1016 switch (maskKind) { 1017 SWITCH_COVERS_ALL_CASES 1018 case 1: 1019 return FoldCount<T, 1>(context, std::move(funcRef)); 1020 case 2: 1021 return FoldCount<T, 2>(context, std::move(funcRef)); 1022 case 4: 1023 return FoldCount<T, 4>(context, std::move(funcRef)); 1024 case 8: 1025 return FoldCount<T, 8>(context, std::move(funcRef)); 1026 } 1027 } else if (name == "dim") { 1028 return FoldElementalIntrinsic<T, T, T>(context, std::move(funcRef), 1029 ScalarFunc<T, T, T>( 1030 [&context](const Scalar<T> &x, const Scalar<T> &y) -> Scalar<T> { 1031 auto result{x.DIM(y)}; 1032 if (result.overflow && 1033 context.languageFeatures().ShouldWarn( 1034 common::UsageWarning::FoldingException)) { 1035 context.messages().Say(common::UsageWarning::FoldingException, 1036 "DIM intrinsic folding overflow"_warn_en_US); 1037 } 1038 return result.value; 1039 })); 1040 } else if (name == "exponent") { 1041 if (auto *sx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 1042 return common::visit( 1043 [&funcRef, &context](const auto &x) -> Expr<T> { 1044 using TR = typename std::decay_t<decltype(x)>::Result; 1045 return FoldElementalIntrinsic<T, TR>(context, std::move(funcRef), 1046 &Scalar<TR>::template EXPONENT<Scalar<T>>); 1047 }, 1048 sx->u); 1049 } else { 1050 DIE("exponent argument must be real"); 1051 } 1052 } else if (name == "findloc") { 1053 return FoldLocation<WhichLocation::Findloc, T>(context, std::move(funcRef)); 1054 } else if (name == "huge") { 1055 return Expr<T>{Scalar<T>::HUGE()}; 1056 } else if (name == "iachar" || name == "ichar") { 1057 auto *someChar{UnwrapExpr<Expr<SomeCharacter>>(args[0])}; 1058 CHECK(someChar); 1059 if (auto len{ToInt64(someChar->LEN())}) { 1060 if (len.value() < 1) { 1061 context.messages().Say( 1062 "Character in intrinsic function %s must have length one"_err_en_US, 1063 name); 1064 } else if (len.value() > 1 && 1065 context.languageFeatures().ShouldWarn( 1066 common::UsageWarning::Portability)) { 1067 // Do not die, this was not checked before 1068 context.messages().Say(common::UsageWarning::Portability, 1069 "Character in intrinsic function %s should have length one"_port_en_US, 1070 name); 1071 } else { 1072 return common::visit( 1073 [&funcRef, &context, &FromInt64](const auto &str) -> Expr<T> { 1074 using Char = typename std::decay_t<decltype(str)>::Result; 1075 (void)FromInt64; 1076 return FoldElementalIntrinsic<T, Char>(context, 1077 std::move(funcRef), 1078 ScalarFunc<T, Char>( 1079 #ifndef _MSC_VER 1080 [&FromInt64](const Scalar<Char> &c) { 1081 return FromInt64(CharacterUtils<Char::kind>::ICHAR( 1082 CharacterUtils<Char::kind>::Resize(c, 1))); 1083 })); 1084 #else // _MSC_VER 1085 // MSVC 14 get confused by the original code above and 1086 // ends up emitting an error about passing a std::string 1087 // to the std::u16string instantiation of 1088 // CharacterUtils<2>::ICHAR(). Can't find a work-around, 1089 // so remove the FromInt64 error checking lambda that 1090 // seems to have caused the proble. 1091 [](const Scalar<Char> &c) { 1092 return CharacterUtils<Char::kind>::ICHAR( 1093 CharacterUtils<Char::kind>::Resize(c, 1)); 1094 })); 1095 #endif // _MSC_VER 1096 }, 1097 someChar->u); 1098 } 1099 } 1100 } else if (name == "index" || name == "scan" || name == "verify") { 1101 if (auto *charExpr{UnwrapExpr<Expr<SomeCharacter>>(args[0])}) { 1102 return common::visit( 1103 [&](const auto &kch) -> Expr<T> { 1104 using TC = typename std::decay_t<decltype(kch)>::Result; 1105 if (UnwrapExpr<Expr<SomeLogical>>(args[2])) { // BACK= 1106 return FoldElementalIntrinsic<T, TC, TC, LogicalResult>(context, 1107 std::move(funcRef), 1108 ScalarFunc<T, TC, TC, LogicalResult>{ 1109 [&name, &FromInt64](const Scalar<TC> &str, 1110 const Scalar<TC> &other, 1111 const Scalar<LogicalResult> &back) { 1112 return FromInt64(name == "index" 1113 ? CharacterUtils<TC::kind>::INDEX( 1114 str, other, back.IsTrue()) 1115 : name == "scan" 1116 ? CharacterUtils<TC::kind>::SCAN( 1117 str, other, back.IsTrue()) 1118 : CharacterUtils<TC::kind>::VERIFY( 1119 str, other, back.IsTrue())); 1120 }}); 1121 } else { 1122 return FoldElementalIntrinsic<T, TC, TC>(context, 1123 std::move(funcRef), 1124 ScalarFunc<T, TC, TC>{ 1125 [&name, &FromInt64]( 1126 const Scalar<TC> &str, const Scalar<TC> &other) { 1127 return FromInt64(name == "index" 1128 ? CharacterUtils<TC::kind>::INDEX(str, other) 1129 : name == "scan" 1130 ? CharacterUtils<TC::kind>::SCAN(str, other) 1131 : CharacterUtils<TC::kind>::VERIFY(str, other)); 1132 }}); 1133 } 1134 }, 1135 charExpr->u); 1136 } else { 1137 DIE("first argument must be CHARACTER"); 1138 } 1139 } else if (name == "int_ptr_kind") { 1140 return Expr<T>{8}; 1141 } else if (name == "kind") { 1142 // FoldOperation(FunctionRef &&) in fold-implementation.h will not 1143 // have folded the argument; in the case of TypeParamInquiry, 1144 // try to get the type of the parameter itself. 1145 if (const auto *expr{args[0] ? args[0]->UnwrapExpr() : nullptr}) { 1146 if (const auto *inquiry{UnwrapExpr<TypeParamInquiry>(*expr)}) { 1147 if (const auto *typeSpec{inquiry->parameter().GetType()}) { 1148 if (const auto *intrinType{typeSpec->AsIntrinsic()}) { 1149 if (auto k{ToInt64(Fold( 1150 context, Expr<SubscriptInteger>{intrinType->kind()}))}) { 1151 return Expr<T>{*k}; 1152 } 1153 } 1154 } 1155 } else if (auto dyType{expr->GetType()}) { 1156 return Expr<T>{dyType->kind()}; 1157 } 1158 } 1159 } else if (name == "lbound") { 1160 return LBOUND(context, std::move(funcRef)); 1161 } else if (name == "lcobound") { 1162 return COBOUND(context, std::move(funcRef), /*isUCOBOUND=*/false); 1163 } else if (name == "leadz" || name == "trailz" || name == "poppar" || 1164 name == "popcnt") { 1165 if (auto *sn{UnwrapExpr<Expr<SomeKind<T::category>>>(args[0])}) { 1166 return common::visit( 1167 [&funcRef, &context, &name](const auto &n) -> Expr<T> { 1168 using TI = typename std::decay_t<decltype(n)>::Result; 1169 if (name == "poppar") { 1170 return FoldElementalIntrinsic<T, TI>(context, std::move(funcRef), 1171 ScalarFunc<T, TI>([](const Scalar<TI> &i) -> Scalar<T> { 1172 return Scalar<T>{i.POPPAR() ? 1 : 0}; 1173 })); 1174 } 1175 auto fptr{&Scalar<TI>::LEADZ}; 1176 if (name == "leadz") { // done in fptr definition 1177 } else if (name == "trailz") { 1178 fptr = &Scalar<TI>::TRAILZ; 1179 } else if (name == "popcnt") { 1180 fptr = &Scalar<TI>::POPCNT; 1181 } else { 1182 common::die( 1183 "missing case to fold intrinsic function %s", name.c_str()); 1184 } 1185 return FoldElementalIntrinsic<T, TI>(context, std::move(funcRef), 1186 // `i` should be declared as `const Scalar<TI>&`. 1187 // We declare it as `auto` to workaround an msvc bug: 1188 // https://developercommunity.visualstudio.com/t/Regression:-nested-closure-assumes-wrong/10130223 1189 ScalarFunc<T, TI>([&fptr](const auto &i) -> Scalar<T> { 1190 return Scalar<T>{std::invoke(fptr, i)}; 1191 })); 1192 }, 1193 sn->u); 1194 } else { 1195 DIE("leadz argument must be integer"); 1196 } 1197 } else if (name == "len") { 1198 if (auto *charExpr{UnwrapExpr<Expr<SomeCharacter>>(args[0])}) { 1199 return common::visit( 1200 [&](auto &kx) { 1201 if (auto len{kx.LEN()}) { 1202 if (IsScopeInvariantExpr(*len)) { 1203 return Fold(context, ConvertToType<T>(*std::move(len))); 1204 } else { 1205 return Expr<T>{std::move(funcRef)}; 1206 } 1207 } else { 1208 return Expr<T>{std::move(funcRef)}; 1209 } 1210 }, 1211 charExpr->u); 1212 } else { 1213 DIE("len() argument must be of character type"); 1214 } 1215 } else if (name == "len_trim") { 1216 if (auto *charExpr{UnwrapExpr<Expr<SomeCharacter>>(args[0])}) { 1217 return common::visit( 1218 [&](const auto &kch) -> Expr<T> { 1219 using TC = typename std::decay_t<decltype(kch)>::Result; 1220 return FoldElementalIntrinsic<T, TC>(context, std::move(funcRef), 1221 ScalarFunc<T, TC>{[&FromInt64](const Scalar<TC> &str) { 1222 return FromInt64(CharacterUtils<TC::kind>::LEN_TRIM(str)); 1223 }}); 1224 }, 1225 charExpr->u); 1226 } else { 1227 DIE("len_trim() argument must be of character type"); 1228 } 1229 } else if (name == "max0" || name == "max1") { 1230 return RewriteSpecificMINorMAX(context, std::move(funcRef)); 1231 } else if (name == "maxexponent") { 1232 if (auto *sx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 1233 return common::visit( 1234 [](const auto &x) { 1235 using TR = typename std::decay_t<decltype(x)>::Result; 1236 return Expr<T>{Scalar<TR>::MAXEXPONENT}; 1237 }, 1238 sx->u); 1239 } 1240 } else if (name == "maxloc") { 1241 return FoldLocation<WhichLocation::Maxloc, T>(context, std::move(funcRef)); 1242 } else if (name == "min0" || name == "min1") { 1243 return RewriteSpecificMINorMAX(context, std::move(funcRef)); 1244 } else if (name == "minexponent") { 1245 if (auto *sx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 1246 return common::visit( 1247 [](const auto &x) { 1248 using TR = typename std::decay_t<decltype(x)>::Result; 1249 return Expr<T>{Scalar<TR>::MINEXPONENT}; 1250 }, 1251 sx->u); 1252 } 1253 } else if (name == "minloc") { 1254 return FoldLocation<WhichLocation::Minloc, T>(context, std::move(funcRef)); 1255 } else if (name == "mod") { 1256 bool badPConst{false}; 1257 if (auto *pExpr{UnwrapExpr<Expr<T>>(args[1])}) { 1258 *pExpr = Fold(context, std::move(*pExpr)); 1259 if (auto pConst{GetScalarConstantValue<T>(*pExpr)}; pConst && 1260 pConst->IsZero() && 1261 context.languageFeatures().ShouldWarn( 1262 common::UsageWarning::FoldingAvoidsRuntimeCrash)) { 1263 context.messages().Say(common::UsageWarning::FoldingAvoidsRuntimeCrash, 1264 "MOD: P argument is zero"_warn_en_US); 1265 badPConst = true; 1266 } 1267 } 1268 return FoldElementalIntrinsic<T, T, T>(context, std::move(funcRef), 1269 ScalarFuncWithContext<T, T, T>( 1270 [badPConst](FoldingContext &context, const Scalar<T> &x, 1271 const Scalar<T> &y) -> Scalar<T> { 1272 auto quotRem{x.DivideSigned(y)}; 1273 if (context.languageFeatures().ShouldWarn( 1274 common::UsageWarning::FoldingAvoidsRuntimeCrash)) { 1275 if (!badPConst && quotRem.divisionByZero) { 1276 context.messages().Say( 1277 common::UsageWarning::FoldingAvoidsRuntimeCrash, 1278 "mod() by zero"_warn_en_US); 1279 } else if (quotRem.overflow) { 1280 context.messages().Say( 1281 common::UsageWarning::FoldingAvoidsRuntimeCrash, 1282 "mod() folding overflowed"_warn_en_US); 1283 } 1284 } 1285 return quotRem.remainder; 1286 })); 1287 } else if (name == "modulo") { 1288 bool badPConst{false}; 1289 if (auto *pExpr{UnwrapExpr<Expr<T>>(args[1])}) { 1290 *pExpr = Fold(context, std::move(*pExpr)); 1291 if (auto pConst{GetScalarConstantValue<T>(*pExpr)}; pConst && 1292 pConst->IsZero() && 1293 context.languageFeatures().ShouldWarn( 1294 common::UsageWarning::FoldingAvoidsRuntimeCrash)) { 1295 context.messages().Say(common::UsageWarning::FoldingAvoidsRuntimeCrash, 1296 "MODULO: P argument is zero"_warn_en_US); 1297 badPConst = true; 1298 } 1299 } 1300 return FoldElementalIntrinsic<T, T, T>(context, std::move(funcRef), 1301 ScalarFuncWithContext<T, T, T>([badPConst](FoldingContext &context, 1302 const Scalar<T> &x, 1303 const Scalar<T> &y) -> Scalar<T> { 1304 auto result{x.MODULO(y)}; 1305 if (!badPConst && result.overflow && 1306 context.languageFeatures().ShouldWarn( 1307 common::UsageWarning::FoldingException)) { 1308 context.messages().Say(common::UsageWarning::FoldingException, 1309 "modulo() folding overflowed"_warn_en_US); 1310 } 1311 return result.value; 1312 })); 1313 } else if (name == "precision") { 1314 if (const auto *cx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 1315 return Expr<T>{common::visit( 1316 [](const auto &kx) { 1317 return Scalar<ResultType<decltype(kx)>>::PRECISION; 1318 }, 1319 cx->u)}; 1320 } else if (const auto *cx{UnwrapExpr<Expr<SomeComplex>>(args[0])}) { 1321 return Expr<T>{common::visit( 1322 [](const auto &kx) { 1323 return Scalar<typename ResultType<decltype(kx)>::Part>::PRECISION; 1324 }, 1325 cx->u)}; 1326 } 1327 } else if (name == "range") { 1328 if (const auto *cx{UnwrapExpr<Expr<SomeInteger>>(args[0])}) { 1329 return Expr<T>{common::visit( 1330 [](const auto &kx) { 1331 return Scalar<ResultType<decltype(kx)>>::RANGE; 1332 }, 1333 cx->u)}; 1334 } else if (const auto *cx{UnwrapExpr<Expr<SomeUnsigned>>(args[0])}) { 1335 return Expr<T>{common::visit( 1336 [](const auto &kx) { 1337 return Scalar<ResultType<decltype(kx)>>::UnsignedRANGE; 1338 }, 1339 cx->u)}; 1340 } else if (const auto *cx{UnwrapExpr<Expr<SomeReal>>(args[0])}) { 1341 return Expr<T>{common::visit( 1342 [](const auto &kx) { 1343 return Scalar<ResultType<decltype(kx)>>::RANGE; 1344 }, 1345 cx->u)}; 1346 } else if (const auto *cx{UnwrapExpr<Expr<SomeComplex>>(args[0])}) { 1347 return Expr<T>{common::visit( 1348 [](const auto &kx) { 1349 return Scalar<typename ResultType<decltype(kx)>::Part>::RANGE; 1350 }, 1351 cx->u)}; 1352 } 1353 } else if (name == "rank") { 1354 if (args[0]) { 1355 const Symbol *symbol{nullptr}; 1356 if (auto dataRef{ExtractDataRef(args[0])}) { 1357 symbol = &dataRef->GetLastSymbol(); 1358 } else { 1359 symbol = args[0]->GetAssumedTypeDummy(); 1360 } 1361 if (symbol && IsAssumedRank(*symbol)) { 1362 // DescriptorInquiry can only be placed in expression of kind 1363 // DescriptorInquiry::Result::kind. 1364 return ConvertToType<T>( 1365 Expr<Type<TypeCategory::Integer, DescriptorInquiry::Result::kind>>{ 1366 DescriptorInquiry{ 1367 NamedEntity{*symbol}, DescriptorInquiry::Field::Rank}}); 1368 } 1369 return Expr<T>{args[0]->Rank()}; 1370 } 1371 } else if (name == "selected_char_kind") { 1372 if (const auto *chCon{UnwrapExpr<Constant<TypeOf<std::string>>>(args[0])}) { 1373 if (std::optional<std::string> value{chCon->GetScalarValue()}) { 1374 int defaultKind{ 1375 context.defaults().GetDefaultKind(TypeCategory::Character)}; 1376 return Expr<T>{SelectedCharKind(*value, defaultKind)}; 1377 } 1378 } 1379 } else if (name == "selected_int_kind" || name == "selected_unsigned_kind") { 1380 if (auto p{ToInt64(args[0])}) { 1381 return Expr<T>{context.targetCharacteristics().SelectedIntKind(*p)}; 1382 } 1383 } else if (name == "selected_logical_kind") { 1384 if (auto p{ToInt64(args[0])}) { 1385 return Expr<T>{context.targetCharacteristics().SelectedLogicalKind(*p)}; 1386 } 1387 } else if (name == "selected_real_kind" || 1388 name == "__builtin_ieee_selected_real_kind") { 1389 if (auto p{GetInt64ArgOr(args[0], 0)}) { 1390 if (auto r{GetInt64ArgOr(args[1], 0)}) { 1391 if (auto radix{GetInt64ArgOr(args[2], 2)}) { 1392 return Expr<T>{ 1393 context.targetCharacteristics().SelectedRealKind(*p, *r, *radix)}; 1394 } 1395 } 1396 } 1397 } else if (name == "shape") { 1398 if (auto shape{GetContextFreeShape(context, args[0])}) { 1399 if (auto shapeExpr{AsExtentArrayExpr(*shape)}) { 1400 return Fold(context, ConvertToType<T>(std::move(*shapeExpr))); 1401 } 1402 } 1403 } else if (name == "sign") { 1404 return FoldElementalIntrinsic<T, T, T>(context, std::move(funcRef), 1405 ScalarFunc<T, T, T>([&context](const Scalar<T> &j, 1406 const Scalar<T> &k) -> Scalar<T> { 1407 typename Scalar<T>::ValueWithOverflow result{j.SIGN(k)}; 1408 if (result.overflow && 1409 context.languageFeatures().ShouldWarn( 1410 common::UsageWarning::FoldingException)) { 1411 context.messages().Say(common::UsageWarning::FoldingException, 1412 "sign(integer(kind=%d)) folding overflowed"_warn_en_US, KIND); 1413 } 1414 return result.value; 1415 })); 1416 } else if (name == "size") { 1417 if (auto shape{GetContextFreeShape(context, args[0])}) { 1418 if (args[1]) { // DIM= is present, get one extent 1419 std::optional<int> dim; 1420 if (const auto *array{args[0].value().UnwrapExpr()}; array && 1421 !CheckDimArg(args[1], *array, context.messages(), false, dim)) { 1422 return MakeInvalidIntrinsic<T>(std::move(funcRef)); 1423 } else if (dim) { 1424 if (auto &extent{shape->at(*dim)}) { 1425 return Fold(context, ConvertToType<T>(std::move(*extent))); 1426 } 1427 } 1428 } else if (auto extents{common::AllElementsPresent(std::move(*shape))}) { 1429 // DIM= is absent; compute PRODUCT(SHAPE()) 1430 ExtentExpr product{1}; 1431 for (auto &&extent : std::move(*extents)) { 1432 product = std::move(product) * std::move(extent); 1433 } 1434 return Expr<T>{ConvertToType<T>(Fold(context, std::move(product)))}; 1435 } 1436 } 1437 } else if (name == "sizeof") { // in bytes; extension 1438 if (auto info{ 1439 characteristics::TypeAndShape::Characterize(args[0], context)}) { 1440 if (auto bytes{info->MeasureSizeInBytes(context)}) { 1441 return Expr<T>{Fold(context, ConvertToType<T>(std::move(*bytes)))}; 1442 } 1443 } 1444 } else if (name == "storage_size") { // in bits 1445 if (auto info{ 1446 characteristics::TypeAndShape::Characterize(args[0], context)}) { 1447 if (auto bytes{info->MeasureElementSizeInBytes(context, true)}) { 1448 return Expr<T>{ 1449 Fold(context, Expr<T>{8} * ConvertToType<T>(std::move(*bytes)))}; 1450 } 1451 } 1452 } else if (name == "ubound") { 1453 return UBOUND(context, std::move(funcRef)); 1454 } else if (name == "ucobound") { 1455 return COBOUND(context, std::move(funcRef), /*isUCOBOUND=*/true); 1456 } else if (name == "__builtin_numeric_storage_size") { 1457 if (!context.moduleFileName()) { 1458 // Don't fold this reference until it appears in the module file 1459 // for ISO_FORTRAN_ENV -- the value depends on the compiler options 1460 // that might be in force. 1461 } else { 1462 auto intBytes{ 1463 context.targetCharacteristics().GetByteSize(TypeCategory::Integer, 1464 context.defaults().GetDefaultKind(TypeCategory::Integer))}; 1465 auto realBytes{ 1466 context.targetCharacteristics().GetByteSize(TypeCategory::Real, 1467 context.defaults().GetDefaultKind(TypeCategory::Real))}; 1468 if (intBytes != realBytes && 1469 context.languageFeatures().ShouldWarn( 1470 common::UsageWarning::FoldingValueChecks)) { 1471 context.messages().Say(common::UsageWarning::FoldingValueChecks, 1472 *context.moduleFileName(), 1473 "NUMERIC_STORAGE_SIZE from ISO_FORTRAN_ENV is not well-defined when default INTEGER and REAL are not consistent due to compiler options"_warn_en_US); 1474 } 1475 return Expr<T>{8 * std::min(intBytes, realBytes)}; 1476 } 1477 } 1478 return Expr<T>{std::move(funcRef)}; 1479 } 1480 1481 template <int KIND> 1482 Expr<Type<TypeCategory::Unsigned, KIND>> FoldIntrinsicFunction( 1483 FoldingContext &context, 1484 FunctionRef<Type<TypeCategory::Unsigned, KIND>> &&funcRef) { 1485 if (auto foldedCommon{FoldIntrinsicFunctionCommon(context, funcRef)}) { 1486 return std::move(*foldedCommon); 1487 } 1488 using T = Type<TypeCategory::Unsigned, KIND>; 1489 ActualArguments &args{funcRef.arguments()}; 1490 auto *intrinsic{std::get_if<SpecificIntrinsic>(&funcRef.proc().u)}; 1491 CHECK(intrinsic); 1492 std::string name{intrinsic->name}; 1493 if (name == "huge") { 1494 return Expr<T>{Scalar<T>{}.NOT()}; 1495 } else if (name == "mod" || name == "modulo") { 1496 bool badPConst{false}; 1497 if (auto *pExpr{UnwrapExpr<Expr<T>>(args[1])}) { 1498 *pExpr = Fold(context, std::move(*pExpr)); 1499 if (auto pConst{GetScalarConstantValue<T>(*pExpr)}; pConst && 1500 pConst->IsZero() && 1501 context.languageFeatures().ShouldWarn( 1502 common::UsageWarning::FoldingAvoidsRuntimeCrash)) { 1503 context.messages().Say(common::UsageWarning::FoldingAvoidsRuntimeCrash, 1504 "%s: P argument is zero"_warn_en_US, name); 1505 badPConst = true; 1506 } 1507 } 1508 return FoldElementalIntrinsic<T, T, T>(context, std::move(funcRef), 1509 ScalarFuncWithContext<T, T, T>( 1510 [badPConst, &name](FoldingContext &context, const Scalar<T> &x, 1511 const Scalar<T> &y) -> Scalar<T> { 1512 auto quotRem{x.DivideUnsigned(y)}; 1513 if (context.languageFeatures().ShouldWarn( 1514 common::UsageWarning::FoldingAvoidsRuntimeCrash)) { 1515 if (!badPConst && quotRem.divisionByZero) { 1516 context.messages().Say( 1517 common::UsageWarning::FoldingAvoidsRuntimeCrash, 1518 "%s() by zero"_warn_en_US, name); 1519 } 1520 } 1521 return quotRem.remainder; 1522 })); 1523 } 1524 return Expr<T>{std::move(funcRef)}; 1525 } 1526 1527 // Substitutes a bare type parameter reference with its value if it has one now 1528 // in an instantiation. Bare LEN type parameters are substituted only when 1529 // the known value is constant. 1530 Expr<TypeParamInquiry::Result> FoldOperation( 1531 FoldingContext &context, TypeParamInquiry &&inquiry) { 1532 std::optional<NamedEntity> base{inquiry.base()}; 1533 parser::CharBlock parameterName{inquiry.parameter().name()}; 1534 if (base) { 1535 // Handling "designator%typeParam". Get the value of the type parameter 1536 // from the instantiation of the base 1537 if (const semantics::DeclTypeSpec * 1538 declType{base->GetLastSymbol().GetType()}) { 1539 if (const semantics::ParamValue * 1540 paramValue{ 1541 declType->derivedTypeSpec().FindParameter(parameterName)}) { 1542 const semantics::MaybeIntExpr ¶mExpr{paramValue->GetExplicit()}; 1543 if (paramExpr && IsConstantExpr(*paramExpr)) { 1544 Expr<SomeInteger> intExpr{*paramExpr}; 1545 return Fold(context, 1546 ConvertToType<TypeParamInquiry::Result>(std::move(intExpr))); 1547 } 1548 } 1549 } 1550 } else { 1551 // A "bare" type parameter: replace with its value, if that's now known 1552 // in a current derived type instantiation. 1553 if (const auto *pdt{context.pdtInstance()}) { 1554 auto restorer{context.WithoutPDTInstance()}; // don't loop 1555 bool isLen{false}; 1556 if (const semantics::Scope * scope{pdt->scope()}) { 1557 auto iter{scope->find(parameterName)}; 1558 if (iter != scope->end()) { 1559 const Symbol &symbol{*iter->second}; 1560 const auto *details{symbol.detailsIf<semantics::TypeParamDetails>()}; 1561 if (details) { 1562 isLen = details->attr() == common::TypeParamAttr::Len; 1563 const semantics::MaybeIntExpr &initExpr{details->init()}; 1564 if (initExpr && IsConstantExpr(*initExpr) && 1565 (!isLen || ToInt64(*initExpr))) { 1566 Expr<SomeInteger> expr{*initExpr}; 1567 return Fold(context, 1568 ConvertToType<TypeParamInquiry::Result>(std::move(expr))); 1569 } 1570 } 1571 } 1572 } 1573 if (const auto *value{pdt->FindParameter(parameterName)}) { 1574 if (value->isExplicit()) { 1575 auto folded{Fold(context, 1576 AsExpr(ConvertToType<TypeParamInquiry::Result>( 1577 Expr<SomeInteger>{value->GetExplicit().value()})))}; 1578 if (!isLen || ToInt64(folded)) { 1579 return folded; 1580 } 1581 } 1582 } 1583 } 1584 } 1585 return AsExpr(std::move(inquiry)); 1586 } 1587 1588 std::optional<std::int64_t> ToInt64(const Expr<SomeInteger> &expr) { 1589 return common::visit( 1590 [](const auto &kindExpr) { return ToInt64(kindExpr); }, expr.u); 1591 } 1592 1593 std::optional<std::int64_t> ToInt64(const Expr<SomeUnsigned> &expr) { 1594 return common::visit( 1595 [](const auto &kindExpr) { return ToInt64(kindExpr); }, expr.u); 1596 } 1597 1598 std::optional<std::int64_t> ToInt64(const Expr<SomeType> &expr) { 1599 if (const auto *intExpr{UnwrapExpr<Expr<SomeInteger>>(expr)}) { 1600 return ToInt64(*intExpr); 1601 } else if (const auto *unsignedExpr{UnwrapExpr<Expr<SomeUnsigned>>(expr)}) { 1602 return ToInt64(*unsignedExpr); 1603 } else { 1604 return std::nullopt; 1605 } 1606 } 1607 1608 std::optional<std::int64_t> ToInt64(const ActualArgument &arg) { 1609 return ToInt64(arg.UnwrapExpr()); 1610 } 1611 1612 #ifdef _MSC_VER // disable bogus warning about missing definitions 1613 #pragma warning(disable : 4661) 1614 #endif 1615 FOR_EACH_INTEGER_KIND(template class ExpressionBase, ) 1616 FOR_EACH_UNSIGNED_KIND(template class ExpressionBase, ) 1617 template class ExpressionBase<SomeInteger>; 1618 template class ExpressionBase<SomeUnsigned>; 1619 } // namespace Fortran::evaluate 1620