1 //===-- Execution.cpp - Implement code to simulate the program ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the actual instruction interpreter. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Interpreter.h" 14 #include "llvm/ADT/APInt.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/CodeGen/IntrinsicLowering.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DerivedTypes.h" 19 #include "llvm/IR/GetElementPtrTypeIterator.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/Support/CommandLine.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/ErrorHandling.h" 24 #include "llvm/Support/MathExtras.h" 25 #include "llvm/Support/raw_ostream.h" 26 #include <algorithm> 27 #include <cmath> 28 using namespace llvm; 29 30 #define DEBUG_TYPE "interpreter" 31 32 STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed"); 33 34 static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden, 35 cl::desc("make the interpreter print every volatile load and store")); 36 37 //===----------------------------------------------------------------------===// 38 // Various Helper Functions 39 //===----------------------------------------------------------------------===// 40 41 static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) { 42 SF.Values[V] = Val; 43 } 44 45 //===----------------------------------------------------------------------===// 46 // Unary Instruction Implementations 47 //===----------------------------------------------------------------------===// 48 49 static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) { 50 switch (Ty->getTypeID()) { 51 case Type::FloatTyID: 52 Dest.FloatVal = -Src.FloatVal; 53 case Type::DoubleTyID: 54 Dest.DoubleVal = -Src.DoubleVal; 55 default: 56 llvm_unreachable("Unhandled type for FNeg instruction"); 57 } 58 } 59 60 void Interpreter::visitUnaryOperator(UnaryOperator &I) { 61 ExecutionContext &SF = ECStack.back(); 62 Type *Ty = I.getOperand(0)->getType(); 63 GenericValue Src = getOperandValue(I.getOperand(0), SF); 64 GenericValue R; // Result 65 66 // First process vector operation 67 if (Ty->isVectorTy()) { 68 R.AggregateVal.resize(Src.AggregateVal.size()); 69 70 switch(I.getOpcode()) { 71 default: 72 llvm_unreachable("Don't know how to handle this unary operator"); 73 break; 74 case Instruction::FNeg: 75 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { 76 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) 77 R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal; 78 } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) { 79 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) 80 R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal; 81 } else { 82 llvm_unreachable("Unhandled type for FNeg instruction"); 83 } 84 break; 85 } 86 } else { 87 switch (I.getOpcode()) { 88 default: 89 llvm_unreachable("Don't know how to handle this unary operator"); 90 break; 91 case Instruction::FNeg: executeFNegInst(R, Src, Ty); break; 92 } 93 } 94 SetValue(&I, R, SF); 95 } 96 97 //===----------------------------------------------------------------------===// 98 // Binary Instruction Implementations 99 //===----------------------------------------------------------------------===// 100 101 #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \ 102 case Type::TY##TyID: \ 103 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 104 break 105 106 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, 107 GenericValue Src2, Type *Ty) { 108 switch (Ty->getTypeID()) { 109 IMPLEMENT_BINARY_OPERATOR(+, Float); 110 IMPLEMENT_BINARY_OPERATOR(+, Double); 111 default: 112 dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n"; 113 llvm_unreachable(nullptr); 114 } 115 } 116 117 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, 118 GenericValue Src2, Type *Ty) { 119 switch (Ty->getTypeID()) { 120 IMPLEMENT_BINARY_OPERATOR(-, Float); 121 IMPLEMENT_BINARY_OPERATOR(-, Double); 122 default: 123 dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n"; 124 llvm_unreachable(nullptr); 125 } 126 } 127 128 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, 129 GenericValue Src2, Type *Ty) { 130 switch (Ty->getTypeID()) { 131 IMPLEMENT_BINARY_OPERATOR(*, Float); 132 IMPLEMENT_BINARY_OPERATOR(*, Double); 133 default: 134 dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n"; 135 llvm_unreachable(nullptr); 136 } 137 } 138 139 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, 140 GenericValue Src2, Type *Ty) { 141 switch (Ty->getTypeID()) { 142 IMPLEMENT_BINARY_OPERATOR(/, Float); 143 IMPLEMENT_BINARY_OPERATOR(/, Double); 144 default: 145 dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n"; 146 llvm_unreachable(nullptr); 147 } 148 } 149 150 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, 151 GenericValue Src2, Type *Ty) { 152 switch (Ty->getTypeID()) { 153 case Type::FloatTyID: 154 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); 155 break; 156 case Type::DoubleTyID: 157 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); 158 break; 159 default: 160 dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n"; 161 llvm_unreachable(nullptr); 162 } 163 } 164 165 #define IMPLEMENT_INTEGER_ICMP(OP, TY) \ 166 case Type::IntegerTyID: \ 167 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 168 break; 169 170 #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ 171 case Type::VectorTyID: { \ 172 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ 173 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ 174 for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ 175 Dest.AggregateVal[_i].IntVal = APInt(1, \ 176 Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\ 177 } break; 178 179 // Handle pointers specially because they must be compared with only as much 180 // width as the host has. We _do not_ want to be comparing 64 bit values when 181 // running on a 32-bit target, otherwise the upper 32 bits might mess up 182 // comparisons if they contain garbage. 183 #define IMPLEMENT_POINTER_ICMP(OP) \ 184 case Type::PointerTyID: \ 185 Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \ 186 (void*)(intptr_t)Src2.PointerVal); \ 187 break; 188 189 static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, 190 Type *Ty) { 191 GenericValue Dest; 192 switch (Ty->getTypeID()) { 193 IMPLEMENT_INTEGER_ICMP(eq,Ty); 194 IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty); 195 IMPLEMENT_POINTER_ICMP(==); 196 default: 197 dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n"; 198 llvm_unreachable(nullptr); 199 } 200 return Dest; 201 } 202 203 static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, 204 Type *Ty) { 205 GenericValue Dest; 206 switch (Ty->getTypeID()) { 207 IMPLEMENT_INTEGER_ICMP(ne,Ty); 208 IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty); 209 IMPLEMENT_POINTER_ICMP(!=); 210 default: 211 dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n"; 212 llvm_unreachable(nullptr); 213 } 214 return Dest; 215 } 216 217 static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, 218 Type *Ty) { 219 GenericValue Dest; 220 switch (Ty->getTypeID()) { 221 IMPLEMENT_INTEGER_ICMP(ult,Ty); 222 IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty); 223 IMPLEMENT_POINTER_ICMP(<); 224 default: 225 dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n"; 226 llvm_unreachable(nullptr); 227 } 228 return Dest; 229 } 230 231 static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, 232 Type *Ty) { 233 GenericValue Dest; 234 switch (Ty->getTypeID()) { 235 IMPLEMENT_INTEGER_ICMP(slt,Ty); 236 IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty); 237 IMPLEMENT_POINTER_ICMP(<); 238 default: 239 dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n"; 240 llvm_unreachable(nullptr); 241 } 242 return Dest; 243 } 244 245 static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, 246 Type *Ty) { 247 GenericValue Dest; 248 switch (Ty->getTypeID()) { 249 IMPLEMENT_INTEGER_ICMP(ugt,Ty); 250 IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty); 251 IMPLEMENT_POINTER_ICMP(>); 252 default: 253 dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n"; 254 llvm_unreachable(nullptr); 255 } 256 return Dest; 257 } 258 259 static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, 260 Type *Ty) { 261 GenericValue Dest; 262 switch (Ty->getTypeID()) { 263 IMPLEMENT_INTEGER_ICMP(sgt,Ty); 264 IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty); 265 IMPLEMENT_POINTER_ICMP(>); 266 default: 267 dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n"; 268 llvm_unreachable(nullptr); 269 } 270 return Dest; 271 } 272 273 static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, 274 Type *Ty) { 275 GenericValue Dest; 276 switch (Ty->getTypeID()) { 277 IMPLEMENT_INTEGER_ICMP(ule,Ty); 278 IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty); 279 IMPLEMENT_POINTER_ICMP(<=); 280 default: 281 dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n"; 282 llvm_unreachable(nullptr); 283 } 284 return Dest; 285 } 286 287 static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, 288 Type *Ty) { 289 GenericValue Dest; 290 switch (Ty->getTypeID()) { 291 IMPLEMENT_INTEGER_ICMP(sle,Ty); 292 IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty); 293 IMPLEMENT_POINTER_ICMP(<=); 294 default: 295 dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n"; 296 llvm_unreachable(nullptr); 297 } 298 return Dest; 299 } 300 301 static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, 302 Type *Ty) { 303 GenericValue Dest; 304 switch (Ty->getTypeID()) { 305 IMPLEMENT_INTEGER_ICMP(uge,Ty); 306 IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty); 307 IMPLEMENT_POINTER_ICMP(>=); 308 default: 309 dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n"; 310 llvm_unreachable(nullptr); 311 } 312 return Dest; 313 } 314 315 static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, 316 Type *Ty) { 317 GenericValue Dest; 318 switch (Ty->getTypeID()) { 319 IMPLEMENT_INTEGER_ICMP(sge,Ty); 320 IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty); 321 IMPLEMENT_POINTER_ICMP(>=); 322 default: 323 dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n"; 324 llvm_unreachable(nullptr); 325 } 326 return Dest; 327 } 328 329 void Interpreter::visitICmpInst(ICmpInst &I) { 330 ExecutionContext &SF = ECStack.back(); 331 Type *Ty = I.getOperand(0)->getType(); 332 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 333 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 334 GenericValue R; // Result 335 336 switch (I.getPredicate()) { 337 case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break; 338 case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break; 339 case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break; 340 case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break; 341 case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break; 342 case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break; 343 case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break; 344 case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break; 345 case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break; 346 case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break; 347 default: 348 dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I; 349 llvm_unreachable(nullptr); 350 } 351 352 SetValue(&I, R, SF); 353 } 354 355 #define IMPLEMENT_FCMP(OP, TY) \ 356 case Type::TY##TyID: \ 357 Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \ 358 break 359 360 #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \ 361 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ 362 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ 363 for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ 364 Dest.AggregateVal[_i].IntVal = APInt(1, \ 365 Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\ 366 break; 367 368 #define IMPLEMENT_VECTOR_FCMP(OP) \ 369 case Type::VectorTyID: \ 370 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ 371 IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ 372 } else { \ 373 IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ 374 } 375 376 static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, 377 Type *Ty) { 378 GenericValue Dest; 379 switch (Ty->getTypeID()) { 380 IMPLEMENT_FCMP(==, Float); 381 IMPLEMENT_FCMP(==, Double); 382 IMPLEMENT_VECTOR_FCMP(==); 383 default: 384 dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n"; 385 llvm_unreachable(nullptr); 386 } 387 return Dest; 388 } 389 390 #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \ 391 if (TY->isFloatTy()) { \ 392 if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ 393 Dest.IntVal = APInt(1,false); \ 394 return Dest; \ 395 } \ 396 } else { \ 397 if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ 398 Dest.IntVal = APInt(1,false); \ 399 return Dest; \ 400 } \ 401 } 402 403 #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \ 404 assert(X.AggregateVal.size() == Y.AggregateVal.size()); \ 405 Dest.AggregateVal.resize( X.AggregateVal.size() ); \ 406 for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \ 407 if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \ 408 Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \ 409 Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \ 410 else { \ 411 Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \ 412 } \ 413 } 414 415 #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \ 416 if (TY->isVectorTy()) { \ 417 if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \ 418 MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \ 419 } else { \ 420 MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \ 421 } \ 422 } \ 423 424 425 426 static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, 427 Type *Ty) 428 { 429 GenericValue Dest; 430 // if input is scalar value and Src1 or Src2 is NaN return false 431 IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2) 432 // if vector input detect NaNs and fill mask 433 MASK_VECTOR_NANS(Ty, Src1, Src2, false) 434 GenericValue DestMask = Dest; 435 switch (Ty->getTypeID()) { 436 IMPLEMENT_FCMP(!=, Float); 437 IMPLEMENT_FCMP(!=, Double); 438 IMPLEMENT_VECTOR_FCMP(!=); 439 default: 440 dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; 441 llvm_unreachable(nullptr); 442 } 443 // in vector case mask out NaN elements 444 if (Ty->isVectorTy()) 445 for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) 446 if (DestMask.AggregateVal[_i].IntVal == false) 447 Dest.AggregateVal[_i].IntVal = APInt(1,false); 448 449 return Dest; 450 } 451 452 static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, 453 Type *Ty) { 454 GenericValue Dest; 455 switch (Ty->getTypeID()) { 456 IMPLEMENT_FCMP(<=, Float); 457 IMPLEMENT_FCMP(<=, Double); 458 IMPLEMENT_VECTOR_FCMP(<=); 459 default: 460 dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n"; 461 llvm_unreachable(nullptr); 462 } 463 return Dest; 464 } 465 466 static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, 467 Type *Ty) { 468 GenericValue Dest; 469 switch (Ty->getTypeID()) { 470 IMPLEMENT_FCMP(>=, Float); 471 IMPLEMENT_FCMP(>=, Double); 472 IMPLEMENT_VECTOR_FCMP(>=); 473 default: 474 dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n"; 475 llvm_unreachable(nullptr); 476 } 477 return Dest; 478 } 479 480 static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, 481 Type *Ty) { 482 GenericValue Dest; 483 switch (Ty->getTypeID()) { 484 IMPLEMENT_FCMP(<, Float); 485 IMPLEMENT_FCMP(<, Double); 486 IMPLEMENT_VECTOR_FCMP(<); 487 default: 488 dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n"; 489 llvm_unreachable(nullptr); 490 } 491 return Dest; 492 } 493 494 static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, 495 Type *Ty) { 496 GenericValue Dest; 497 switch (Ty->getTypeID()) { 498 IMPLEMENT_FCMP(>, Float); 499 IMPLEMENT_FCMP(>, Double); 500 IMPLEMENT_VECTOR_FCMP(>); 501 default: 502 dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n"; 503 llvm_unreachable(nullptr); 504 } 505 return Dest; 506 } 507 508 #define IMPLEMENT_UNORDERED(TY, X,Y) \ 509 if (TY->isFloatTy()) { \ 510 if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ 511 Dest.IntVal = APInt(1,true); \ 512 return Dest; \ 513 } \ 514 } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ 515 Dest.IntVal = APInt(1,true); \ 516 return Dest; \ 517 } 518 519 #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \ 520 if (TY->isVectorTy()) { \ 521 GenericValue DestMask = Dest; \ 522 Dest = FUNC(Src1, Src2, Ty); \ 523 for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \ 524 if (DestMask.AggregateVal[_i].IntVal == true) \ 525 Dest.AggregateVal[_i].IntVal = APInt(1, true); \ 526 return Dest; \ 527 } 528 529 static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, 530 Type *Ty) { 531 GenericValue Dest; 532 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 533 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 534 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ) 535 return executeFCMP_OEQ(Src1, Src2, Ty); 536 537 } 538 539 static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, 540 Type *Ty) { 541 GenericValue Dest; 542 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 543 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 544 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE) 545 return executeFCMP_ONE(Src1, Src2, Ty); 546 } 547 548 static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, 549 Type *Ty) { 550 GenericValue Dest; 551 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 552 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 553 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE) 554 return executeFCMP_OLE(Src1, Src2, Ty); 555 } 556 557 static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, 558 Type *Ty) { 559 GenericValue Dest; 560 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 561 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 562 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE) 563 return executeFCMP_OGE(Src1, Src2, Ty); 564 } 565 566 static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, 567 Type *Ty) { 568 GenericValue Dest; 569 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 570 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 571 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT) 572 return executeFCMP_OLT(Src1, Src2, Ty); 573 } 574 575 static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, 576 Type *Ty) { 577 GenericValue Dest; 578 IMPLEMENT_UNORDERED(Ty, Src1, Src2) 579 MASK_VECTOR_NANS(Ty, Src1, Src2, true) 580 IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT) 581 return executeFCMP_OGT(Src1, Src2, Ty); 582 } 583 584 static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, 585 Type *Ty) { 586 GenericValue Dest; 587 if(Ty->isVectorTy()) { 588 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); 589 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); 590 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { 591 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) 592 Dest.AggregateVal[_i].IntVal = APInt(1, 593 ( (Src1.AggregateVal[_i].FloatVal == 594 Src1.AggregateVal[_i].FloatVal) && 595 (Src2.AggregateVal[_i].FloatVal == 596 Src2.AggregateVal[_i].FloatVal))); 597 } else { 598 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) 599 Dest.AggregateVal[_i].IntVal = APInt(1, 600 ( (Src1.AggregateVal[_i].DoubleVal == 601 Src1.AggregateVal[_i].DoubleVal) && 602 (Src2.AggregateVal[_i].DoubleVal == 603 Src2.AggregateVal[_i].DoubleVal))); 604 } 605 } else if (Ty->isFloatTy()) 606 Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal && 607 Src2.FloatVal == Src2.FloatVal)); 608 else { 609 Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal && 610 Src2.DoubleVal == Src2.DoubleVal)); 611 } 612 return Dest; 613 } 614 615 static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, 616 Type *Ty) { 617 GenericValue Dest; 618 if(Ty->isVectorTy()) { 619 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); 620 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); 621 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { 622 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) 623 Dest.AggregateVal[_i].IntVal = APInt(1, 624 ( (Src1.AggregateVal[_i].FloatVal != 625 Src1.AggregateVal[_i].FloatVal) || 626 (Src2.AggregateVal[_i].FloatVal != 627 Src2.AggregateVal[_i].FloatVal))); 628 } else { 629 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) 630 Dest.AggregateVal[_i].IntVal = APInt(1, 631 ( (Src1.AggregateVal[_i].DoubleVal != 632 Src1.AggregateVal[_i].DoubleVal) || 633 (Src2.AggregateVal[_i].DoubleVal != 634 Src2.AggregateVal[_i].DoubleVal))); 635 } 636 } else if (Ty->isFloatTy()) 637 Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal || 638 Src2.FloatVal != Src2.FloatVal)); 639 else { 640 Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal || 641 Src2.DoubleVal != Src2.DoubleVal)); 642 } 643 return Dest; 644 } 645 646 static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2, 647 Type *Ty, const bool val) { 648 GenericValue Dest; 649 if(Ty->isVectorTy()) { 650 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); 651 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); 652 for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) 653 Dest.AggregateVal[_i].IntVal = APInt(1,val); 654 } else { 655 Dest.IntVal = APInt(1, val); 656 } 657 658 return Dest; 659 } 660 661 void Interpreter::visitFCmpInst(FCmpInst &I) { 662 ExecutionContext &SF = ECStack.back(); 663 Type *Ty = I.getOperand(0)->getType(); 664 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 665 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 666 GenericValue R; // Result 667 668 switch (I.getPredicate()) { 669 default: 670 dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; 671 llvm_unreachable(nullptr); 672 break; 673 case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false); 674 break; 675 case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true); 676 break; 677 case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break; 678 case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break; 679 case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break; 680 case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break; 681 case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break; 682 case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break; 683 case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break; 684 case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break; 685 case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break; 686 case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break; 687 case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break; 688 case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break; 689 case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break; 690 case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break; 691 } 692 693 SetValue(&I, R, SF); 694 } 695 696 static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1, 697 GenericValue Src2, Type *Ty) { 698 GenericValue Result; 699 switch (predicate) { 700 case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty); 701 case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty); 702 case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty); 703 case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty); 704 case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty); 705 case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty); 706 case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty); 707 case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty); 708 case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty); 709 case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty); 710 case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty); 711 case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty); 712 case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty); 713 case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty); 714 case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty); 715 case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty); 716 case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty); 717 case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty); 718 case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty); 719 case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty); 720 case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty); 721 case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty); 722 case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty); 723 case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty); 724 case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false); 725 case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true); 726 default: 727 dbgs() << "Unhandled Cmp predicate\n"; 728 llvm_unreachable(nullptr); 729 } 730 } 731 732 void Interpreter::visitBinaryOperator(BinaryOperator &I) { 733 ExecutionContext &SF = ECStack.back(); 734 Type *Ty = I.getOperand(0)->getType(); 735 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 736 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 737 GenericValue R; // Result 738 739 // First process vector operation 740 if (Ty->isVectorTy()) { 741 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); 742 R.AggregateVal.resize(Src1.AggregateVal.size()); 743 744 // Macros to execute binary operation 'OP' over integer vectors 745 #define INTEGER_VECTOR_OPERATION(OP) \ 746 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ 747 R.AggregateVal[i].IntVal = \ 748 Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal; 749 750 // Additional macros to execute binary operations udiv/sdiv/urem/srem since 751 // they have different notation. 752 #define INTEGER_VECTOR_FUNCTION(OP) \ 753 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ 754 R.AggregateVal[i].IntVal = \ 755 Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal); 756 757 // Macros to execute binary operation 'OP' over floating point type TY 758 // (float or double) vectors 759 #define FLOAT_VECTOR_FUNCTION(OP, TY) \ 760 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ 761 R.AggregateVal[i].TY = \ 762 Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY; 763 764 // Macros to choose appropriate TY: float or double and run operation 765 // execution 766 #define FLOAT_VECTOR_OP(OP) { \ 767 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \ 768 FLOAT_VECTOR_FUNCTION(OP, FloatVal) \ 769 else { \ 770 if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \ 771 FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \ 772 else { \ 773 dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \ 774 llvm_unreachable(0); \ 775 } \ 776 } \ 777 } 778 779 switch(I.getOpcode()){ 780 default: 781 dbgs() << "Don't know how to handle this binary operator!\n-->" << I; 782 llvm_unreachable(nullptr); 783 break; 784 case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break; 785 case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break; 786 case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break; 787 case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break; 788 case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break; 789 case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break; 790 case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break; 791 case Instruction::And: INTEGER_VECTOR_OPERATION(&) break; 792 case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break; 793 case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break; 794 case Instruction::FAdd: FLOAT_VECTOR_OP(+) break; 795 case Instruction::FSub: FLOAT_VECTOR_OP(-) break; 796 case Instruction::FMul: FLOAT_VECTOR_OP(*) break; 797 case Instruction::FDiv: FLOAT_VECTOR_OP(/) break; 798 case Instruction::FRem: 799 if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) 800 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) 801 R.AggregateVal[i].FloatVal = 802 fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal); 803 else { 804 if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) 805 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) 806 R.AggregateVal[i].DoubleVal = 807 fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal); 808 else { 809 dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n"; 810 llvm_unreachable(nullptr); 811 } 812 } 813 break; 814 } 815 } else { 816 switch (I.getOpcode()) { 817 default: 818 dbgs() << "Don't know how to handle this binary operator!\n-->" << I; 819 llvm_unreachable(nullptr); 820 break; 821 case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; 822 case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; 823 case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; 824 case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break; 825 case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break; 826 case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break; 827 case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break; 828 case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break; 829 case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; 830 case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; 831 case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; 832 case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; 833 case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; 834 case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; 835 case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; 836 } 837 } 838 SetValue(&I, R, SF); 839 } 840 841 static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2, 842 GenericValue Src3, Type *Ty) { 843 GenericValue Dest; 844 if(Ty->isVectorTy()) { 845 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); 846 assert(Src2.AggregateVal.size() == Src3.AggregateVal.size()); 847 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); 848 for (size_t i = 0; i < Src1.AggregateVal.size(); ++i) 849 Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ? 850 Src3.AggregateVal[i] : Src2.AggregateVal[i]; 851 } else { 852 Dest = (Src1.IntVal == 0) ? Src3 : Src2; 853 } 854 return Dest; 855 } 856 857 void Interpreter::visitSelectInst(SelectInst &I) { 858 ExecutionContext &SF = ECStack.back(); 859 Type * Ty = I.getOperand(0)->getType(); 860 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 861 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 862 GenericValue Src3 = getOperandValue(I.getOperand(2), SF); 863 GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty); 864 SetValue(&I, R, SF); 865 } 866 867 //===----------------------------------------------------------------------===// 868 // Terminator Instruction Implementations 869 //===----------------------------------------------------------------------===// 870 871 void Interpreter::exitCalled(GenericValue GV) { 872 // runAtExitHandlers() assumes there are no stack frames, but 873 // if exit() was called, then it had a stack frame. Blow away 874 // the stack before interpreting atexit handlers. 875 ECStack.clear(); 876 runAtExitHandlers(); 877 exit(GV.IntVal.zextOrTrunc(32).getZExtValue()); 878 } 879 880 /// Pop the last stack frame off of ECStack and then copy the result 881 /// back into the result variable if we are not returning void. The 882 /// result variable may be the ExitValue, or the Value of the calling 883 /// CallInst if there was a previous stack frame. This method may 884 /// invalidate any ECStack iterators you have. This method also takes 885 /// care of switching to the normal destination BB, if we are returning 886 /// from an invoke. 887 /// 888 void Interpreter::popStackAndReturnValueToCaller(Type *RetTy, 889 GenericValue Result) { 890 // Pop the current stack frame. 891 ECStack.pop_back(); 892 893 if (ECStack.empty()) { // Finished main. Put result into exit code... 894 if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type? 895 ExitValue = Result; // Capture the exit value of the program 896 } else { 897 memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped)); 898 } 899 } else { 900 // If we have a previous stack frame, and we have a previous call, 901 // fill in the return value... 902 ExecutionContext &CallingSF = ECStack.back(); 903 if (Instruction *I = CallingSF.Caller.getInstruction()) { 904 // Save result... 905 if (!CallingSF.Caller.getType()->isVoidTy()) 906 SetValue(I, Result, CallingSF); 907 if (InvokeInst *II = dyn_cast<InvokeInst> (I)) 908 SwitchToNewBasicBlock (II->getNormalDest (), CallingSF); 909 CallingSF.Caller = CallSite(); // We returned from the call... 910 } 911 } 912 } 913 914 void Interpreter::visitReturnInst(ReturnInst &I) { 915 ExecutionContext &SF = ECStack.back(); 916 Type *RetTy = Type::getVoidTy(I.getContext()); 917 GenericValue Result; 918 919 // Save away the return value... (if we are not 'ret void') 920 if (I.getNumOperands()) { 921 RetTy = I.getReturnValue()->getType(); 922 Result = getOperandValue(I.getReturnValue(), SF); 923 } 924 925 popStackAndReturnValueToCaller(RetTy, Result); 926 } 927 928 void Interpreter::visitUnreachableInst(UnreachableInst &I) { 929 report_fatal_error("Program executed an 'unreachable' instruction!"); 930 } 931 932 void Interpreter::visitBranchInst(BranchInst &I) { 933 ExecutionContext &SF = ECStack.back(); 934 BasicBlock *Dest; 935 936 Dest = I.getSuccessor(0); // Uncond branches have a fixed dest... 937 if (!I.isUnconditional()) { 938 Value *Cond = I.getCondition(); 939 if (getOperandValue(Cond, SF).IntVal == 0) // If false cond... 940 Dest = I.getSuccessor(1); 941 } 942 SwitchToNewBasicBlock(Dest, SF); 943 } 944 945 void Interpreter::visitSwitchInst(SwitchInst &I) { 946 ExecutionContext &SF = ECStack.back(); 947 Value* Cond = I.getCondition(); 948 Type *ElTy = Cond->getType(); 949 GenericValue CondVal = getOperandValue(Cond, SF); 950 951 // Check to see if any of the cases match... 952 BasicBlock *Dest = nullptr; 953 for (auto Case : I.cases()) { 954 GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF); 955 if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) { 956 Dest = cast<BasicBlock>(Case.getCaseSuccessor()); 957 break; 958 } 959 } 960 if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default 961 SwitchToNewBasicBlock(Dest, SF); 962 } 963 964 void Interpreter::visitIndirectBrInst(IndirectBrInst &I) { 965 ExecutionContext &SF = ECStack.back(); 966 void *Dest = GVTOP(getOperandValue(I.getAddress(), SF)); 967 SwitchToNewBasicBlock((BasicBlock*)Dest, SF); 968 } 969 970 971 // SwitchToNewBasicBlock - This method is used to jump to a new basic block. 972 // This function handles the actual updating of block and instruction iterators 973 // as well as execution of all of the PHI nodes in the destination block. 974 // 975 // This method does this because all of the PHI nodes must be executed 976 // atomically, reading their inputs before any of the results are updated. Not 977 // doing this can cause problems if the PHI nodes depend on other PHI nodes for 978 // their inputs. If the input PHI node is updated before it is read, incorrect 979 // results can happen. Thus we use a two phase approach. 980 // 981 void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){ 982 BasicBlock *PrevBB = SF.CurBB; // Remember where we came from... 983 SF.CurBB = Dest; // Update CurBB to branch destination 984 SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr... 985 986 if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do 987 988 // Loop over all of the PHI nodes in the current block, reading their inputs. 989 std::vector<GenericValue> ResultValues; 990 991 for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) { 992 // Search for the value corresponding to this previous bb... 993 int i = PN->getBasicBlockIndex(PrevBB); 994 assert(i != -1 && "PHINode doesn't contain entry for predecessor??"); 995 Value *IncomingValue = PN->getIncomingValue(i); 996 997 // Save the incoming value for this PHI node... 998 ResultValues.push_back(getOperandValue(IncomingValue, SF)); 999 } 1000 1001 // Now loop over all of the PHI nodes setting their values... 1002 SF.CurInst = SF.CurBB->begin(); 1003 for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) { 1004 PHINode *PN = cast<PHINode>(SF.CurInst); 1005 SetValue(PN, ResultValues[i], SF); 1006 } 1007 } 1008 1009 //===----------------------------------------------------------------------===// 1010 // Memory Instruction Implementations 1011 //===----------------------------------------------------------------------===// 1012 1013 void Interpreter::visitAllocaInst(AllocaInst &I) { 1014 ExecutionContext &SF = ECStack.back(); 1015 1016 Type *Ty = I.getType()->getElementType(); // Type to be allocated 1017 1018 // Get the number of elements being allocated by the array... 1019 unsigned NumElements = 1020 getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue(); 1021 1022 unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty); 1023 1024 // Avoid malloc-ing zero bytes, use max()... 1025 unsigned MemToAlloc = std::max(1U, NumElements * TypeSize); 1026 1027 // Allocate enough memory to hold the type... 1028 void *Memory = safe_malloc(MemToAlloc); 1029 1030 LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize 1031 << " bytes) x " << NumElements << " (Total: " << MemToAlloc 1032 << ") at " << uintptr_t(Memory) << '\n'); 1033 1034 GenericValue Result = PTOGV(Memory); 1035 assert(Result.PointerVal && "Null pointer returned by malloc!"); 1036 SetValue(&I, Result, SF); 1037 1038 if (I.getOpcode() == Instruction::Alloca) 1039 ECStack.back().Allocas.add(Memory); 1040 } 1041 1042 // getElementOffset - The workhorse for getelementptr. 1043 // 1044 GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, 1045 gep_type_iterator E, 1046 ExecutionContext &SF) { 1047 assert(Ptr->getType()->isPointerTy() && 1048 "Cannot getElementOffset of a nonpointer type!"); 1049 1050 uint64_t Total = 0; 1051 1052 for (; I != E; ++I) { 1053 if (StructType *STy = I.getStructTypeOrNull()) { 1054 const StructLayout *SLO = getDataLayout().getStructLayout(STy); 1055 1056 const ConstantInt *CPU = cast<ConstantInt>(I.getOperand()); 1057 unsigned Index = unsigned(CPU->getZExtValue()); 1058 1059 Total += SLO->getElementOffset(Index); 1060 } else { 1061 // Get the index number for the array... which must be long type... 1062 GenericValue IdxGV = getOperandValue(I.getOperand(), SF); 1063 1064 int64_t Idx; 1065 unsigned BitWidth = 1066 cast<IntegerType>(I.getOperand()->getType())->getBitWidth(); 1067 if (BitWidth == 32) 1068 Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue(); 1069 else { 1070 assert(BitWidth == 64 && "Invalid index type for getelementptr"); 1071 Idx = (int64_t)IdxGV.IntVal.getZExtValue(); 1072 } 1073 Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx; 1074 } 1075 } 1076 1077 GenericValue Result; 1078 Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total; 1079 LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n"); 1080 return Result; 1081 } 1082 1083 void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) { 1084 ExecutionContext &SF = ECStack.back(); 1085 SetValue(&I, executeGEPOperation(I.getPointerOperand(), 1086 gep_type_begin(I), gep_type_end(I), SF), SF); 1087 } 1088 1089 void Interpreter::visitLoadInst(LoadInst &I) { 1090 ExecutionContext &SF = ECStack.back(); 1091 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF); 1092 GenericValue *Ptr = (GenericValue*)GVTOP(SRC); 1093 GenericValue Result; 1094 LoadValueFromMemory(Result, Ptr, I.getType()); 1095 SetValue(&I, Result, SF); 1096 if (I.isVolatile() && PrintVolatile) 1097 dbgs() << "Volatile load " << I; 1098 } 1099 1100 void Interpreter::visitStoreInst(StoreInst &I) { 1101 ExecutionContext &SF = ECStack.back(); 1102 GenericValue Val = getOperandValue(I.getOperand(0), SF); 1103 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF); 1104 StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC), 1105 I.getOperand(0)->getType()); 1106 if (I.isVolatile() && PrintVolatile) 1107 dbgs() << "Volatile store: " << I; 1108 } 1109 1110 //===----------------------------------------------------------------------===// 1111 // Miscellaneous Instruction Implementations 1112 //===----------------------------------------------------------------------===// 1113 1114 void Interpreter::visitCallSite(CallSite CS) { 1115 ExecutionContext &SF = ECStack.back(); 1116 1117 // Check to see if this is an intrinsic function call... 1118 Function *F = CS.getCalledFunction(); 1119 if (F && F->isDeclaration()) 1120 switch (F->getIntrinsicID()) { 1121 case Intrinsic::not_intrinsic: 1122 break; 1123 case Intrinsic::vastart: { // va_start 1124 GenericValue ArgIndex; 1125 ArgIndex.UIntPairVal.first = ECStack.size() - 1; 1126 ArgIndex.UIntPairVal.second = 0; 1127 SetValue(CS.getInstruction(), ArgIndex, SF); 1128 return; 1129 } 1130 case Intrinsic::vaend: // va_end is a noop for the interpreter 1131 return; 1132 case Intrinsic::vacopy: // va_copy: dest = src 1133 SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF); 1134 return; 1135 default: 1136 // If it is an unknown intrinsic function, use the intrinsic lowering 1137 // class to transform it into hopefully tasty LLVM code. 1138 // 1139 BasicBlock::iterator me(CS.getInstruction()); 1140 BasicBlock *Parent = CS.getInstruction()->getParent(); 1141 bool atBegin(Parent->begin() == me); 1142 if (!atBegin) 1143 --me; 1144 IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction())); 1145 1146 // Restore the CurInst pointer to the first instruction newly inserted, if 1147 // any. 1148 if (atBegin) { 1149 SF.CurInst = Parent->begin(); 1150 } else { 1151 SF.CurInst = me; 1152 ++SF.CurInst; 1153 } 1154 return; 1155 } 1156 1157 1158 SF.Caller = CS; 1159 std::vector<GenericValue> ArgVals; 1160 const unsigned NumArgs = SF.Caller.arg_size(); 1161 ArgVals.reserve(NumArgs); 1162 uint16_t pNum = 1; 1163 for (CallSite::arg_iterator i = SF.Caller.arg_begin(), 1164 e = SF.Caller.arg_end(); i != e; ++i, ++pNum) { 1165 Value *V = *i; 1166 ArgVals.push_back(getOperandValue(V, SF)); 1167 } 1168 1169 // To handle indirect calls, we must get the pointer value from the argument 1170 // and treat it as a function pointer. 1171 GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF); 1172 callFunction((Function*)GVTOP(SRC), ArgVals); 1173 } 1174 1175 // auxiliary function for shift operations 1176 static unsigned getShiftAmount(uint64_t orgShiftAmount, 1177 llvm::APInt valueToShift) { 1178 unsigned valueWidth = valueToShift.getBitWidth(); 1179 if (orgShiftAmount < (uint64_t)valueWidth) 1180 return orgShiftAmount; 1181 // according to the llvm documentation, if orgShiftAmount > valueWidth, 1182 // the result is undfeined. but we do shift by this rule: 1183 return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount; 1184 } 1185 1186 1187 void Interpreter::visitShl(BinaryOperator &I) { 1188 ExecutionContext &SF = ECStack.back(); 1189 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1190 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1191 GenericValue Dest; 1192 Type *Ty = I.getType(); 1193 1194 if (Ty->isVectorTy()) { 1195 uint32_t src1Size = uint32_t(Src1.AggregateVal.size()); 1196 assert(src1Size == Src2.AggregateVal.size()); 1197 for (unsigned i = 0; i < src1Size; i++) { 1198 GenericValue Result; 1199 uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); 1200 llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; 1201 Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift)); 1202 Dest.AggregateVal.push_back(Result); 1203 } 1204 } else { 1205 // scalar 1206 uint64_t shiftAmount = Src2.IntVal.getZExtValue(); 1207 llvm::APInt valueToShift = Src1.IntVal; 1208 Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift)); 1209 } 1210 1211 SetValue(&I, Dest, SF); 1212 } 1213 1214 void Interpreter::visitLShr(BinaryOperator &I) { 1215 ExecutionContext &SF = ECStack.back(); 1216 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1217 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1218 GenericValue Dest; 1219 Type *Ty = I.getType(); 1220 1221 if (Ty->isVectorTy()) { 1222 uint32_t src1Size = uint32_t(Src1.AggregateVal.size()); 1223 assert(src1Size == Src2.AggregateVal.size()); 1224 for (unsigned i = 0; i < src1Size; i++) { 1225 GenericValue Result; 1226 uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); 1227 llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; 1228 Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift)); 1229 Dest.AggregateVal.push_back(Result); 1230 } 1231 } else { 1232 // scalar 1233 uint64_t shiftAmount = Src2.IntVal.getZExtValue(); 1234 llvm::APInt valueToShift = Src1.IntVal; 1235 Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift)); 1236 } 1237 1238 SetValue(&I, Dest, SF); 1239 } 1240 1241 void Interpreter::visitAShr(BinaryOperator &I) { 1242 ExecutionContext &SF = ECStack.back(); 1243 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1244 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1245 GenericValue Dest; 1246 Type *Ty = I.getType(); 1247 1248 if (Ty->isVectorTy()) { 1249 size_t src1Size = Src1.AggregateVal.size(); 1250 assert(src1Size == Src2.AggregateVal.size()); 1251 for (unsigned i = 0; i < src1Size; i++) { 1252 GenericValue Result; 1253 uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); 1254 llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; 1255 Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift)); 1256 Dest.AggregateVal.push_back(Result); 1257 } 1258 } else { 1259 // scalar 1260 uint64_t shiftAmount = Src2.IntVal.getZExtValue(); 1261 llvm::APInt valueToShift = Src1.IntVal; 1262 Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift)); 1263 } 1264 1265 SetValue(&I, Dest, SF); 1266 } 1267 1268 GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy, 1269 ExecutionContext &SF) { 1270 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1271 Type *SrcTy = SrcVal->getType(); 1272 if (SrcTy->isVectorTy()) { 1273 Type *DstVecTy = DstTy->getScalarType(); 1274 unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); 1275 unsigned NumElts = Src.AggregateVal.size(); 1276 // the sizes of src and dst vectors must be equal 1277 Dest.AggregateVal.resize(NumElts); 1278 for (unsigned i = 0; i < NumElts; i++) 1279 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth); 1280 } else { 1281 IntegerType *DITy = cast<IntegerType>(DstTy); 1282 unsigned DBitWidth = DITy->getBitWidth(); 1283 Dest.IntVal = Src.IntVal.trunc(DBitWidth); 1284 } 1285 return Dest; 1286 } 1287 1288 GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy, 1289 ExecutionContext &SF) { 1290 Type *SrcTy = SrcVal->getType(); 1291 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1292 if (SrcTy->isVectorTy()) { 1293 Type *DstVecTy = DstTy->getScalarType(); 1294 unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); 1295 unsigned size = Src.AggregateVal.size(); 1296 // the sizes of src and dst vectors must be equal. 1297 Dest.AggregateVal.resize(size); 1298 for (unsigned i = 0; i < size; i++) 1299 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth); 1300 } else { 1301 auto *DITy = cast<IntegerType>(DstTy); 1302 unsigned DBitWidth = DITy->getBitWidth(); 1303 Dest.IntVal = Src.IntVal.sext(DBitWidth); 1304 } 1305 return Dest; 1306 } 1307 1308 GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy, 1309 ExecutionContext &SF) { 1310 Type *SrcTy = SrcVal->getType(); 1311 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1312 if (SrcTy->isVectorTy()) { 1313 Type *DstVecTy = DstTy->getScalarType(); 1314 unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); 1315 1316 unsigned size = Src.AggregateVal.size(); 1317 // the sizes of src and dst vectors must be equal. 1318 Dest.AggregateVal.resize(size); 1319 for (unsigned i = 0; i < size; i++) 1320 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth); 1321 } else { 1322 auto *DITy = cast<IntegerType>(DstTy); 1323 unsigned DBitWidth = DITy->getBitWidth(); 1324 Dest.IntVal = Src.IntVal.zext(DBitWidth); 1325 } 1326 return Dest; 1327 } 1328 1329 GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy, 1330 ExecutionContext &SF) { 1331 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1332 1333 if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { 1334 assert(SrcVal->getType()->getScalarType()->isDoubleTy() && 1335 DstTy->getScalarType()->isFloatTy() && 1336 "Invalid FPTrunc instruction"); 1337 1338 unsigned size = Src.AggregateVal.size(); 1339 // the sizes of src and dst vectors must be equal. 1340 Dest.AggregateVal.resize(size); 1341 for (unsigned i = 0; i < size; i++) 1342 Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal; 1343 } else { 1344 assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() && 1345 "Invalid FPTrunc instruction"); 1346 Dest.FloatVal = (float)Src.DoubleVal; 1347 } 1348 1349 return Dest; 1350 } 1351 1352 GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy, 1353 ExecutionContext &SF) { 1354 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1355 1356 if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { 1357 assert(SrcVal->getType()->getScalarType()->isFloatTy() && 1358 DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction"); 1359 1360 unsigned size = Src.AggregateVal.size(); 1361 // the sizes of src and dst vectors must be equal. 1362 Dest.AggregateVal.resize(size); 1363 for (unsigned i = 0; i < size; i++) 1364 Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal; 1365 } else { 1366 assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() && 1367 "Invalid FPExt instruction"); 1368 Dest.DoubleVal = (double)Src.FloatVal; 1369 } 1370 1371 return Dest; 1372 } 1373 1374 GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy, 1375 ExecutionContext &SF) { 1376 Type *SrcTy = SrcVal->getType(); 1377 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1378 1379 if (SrcTy->getTypeID() == Type::VectorTyID) { 1380 Type *DstVecTy = DstTy->getScalarType(); 1381 Type *SrcVecTy = SrcTy->getScalarType(); 1382 uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); 1383 unsigned size = Src.AggregateVal.size(); 1384 // the sizes of src and dst vectors must be equal. 1385 Dest.AggregateVal.resize(size); 1386 1387 if (SrcVecTy->getTypeID() == Type::FloatTyID) { 1388 assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction"); 1389 for (unsigned i = 0; i < size; i++) 1390 Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt( 1391 Src.AggregateVal[i].FloatVal, DBitWidth); 1392 } else { 1393 for (unsigned i = 0; i < size; i++) 1394 Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt( 1395 Src.AggregateVal[i].DoubleVal, DBitWidth); 1396 } 1397 } else { 1398 // scalar 1399 uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); 1400 assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction"); 1401 1402 if (SrcTy->getTypeID() == Type::FloatTyID) 1403 Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth); 1404 else { 1405 Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth); 1406 } 1407 } 1408 1409 return Dest; 1410 } 1411 1412 GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy, 1413 ExecutionContext &SF) { 1414 Type *SrcTy = SrcVal->getType(); 1415 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1416 1417 if (SrcTy->getTypeID() == Type::VectorTyID) { 1418 Type *DstVecTy = DstTy->getScalarType(); 1419 Type *SrcVecTy = SrcTy->getScalarType(); 1420 uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); 1421 unsigned size = Src.AggregateVal.size(); 1422 // the sizes of src and dst vectors must be equal 1423 Dest.AggregateVal.resize(size); 1424 1425 if (SrcVecTy->getTypeID() == Type::FloatTyID) { 1426 assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction"); 1427 for (unsigned i = 0; i < size; i++) 1428 Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt( 1429 Src.AggregateVal[i].FloatVal, DBitWidth); 1430 } else { 1431 for (unsigned i = 0; i < size; i++) 1432 Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt( 1433 Src.AggregateVal[i].DoubleVal, DBitWidth); 1434 } 1435 } else { 1436 // scalar 1437 unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); 1438 assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction"); 1439 1440 if (SrcTy->getTypeID() == Type::FloatTyID) 1441 Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth); 1442 else { 1443 Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth); 1444 } 1445 } 1446 return Dest; 1447 } 1448 1449 GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy, 1450 ExecutionContext &SF) { 1451 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1452 1453 if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { 1454 Type *DstVecTy = DstTy->getScalarType(); 1455 unsigned size = Src.AggregateVal.size(); 1456 // the sizes of src and dst vectors must be equal 1457 Dest.AggregateVal.resize(size); 1458 1459 if (DstVecTy->getTypeID() == Type::FloatTyID) { 1460 assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction"); 1461 for (unsigned i = 0; i < size; i++) 1462 Dest.AggregateVal[i].FloatVal = 1463 APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal); 1464 } else { 1465 for (unsigned i = 0; i < size; i++) 1466 Dest.AggregateVal[i].DoubleVal = 1467 APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal); 1468 } 1469 } else { 1470 // scalar 1471 assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction"); 1472 if (DstTy->getTypeID() == Type::FloatTyID) 1473 Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal); 1474 else { 1475 Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal); 1476 } 1477 } 1478 return Dest; 1479 } 1480 1481 GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy, 1482 ExecutionContext &SF) { 1483 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1484 1485 if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { 1486 Type *DstVecTy = DstTy->getScalarType(); 1487 unsigned size = Src.AggregateVal.size(); 1488 // the sizes of src and dst vectors must be equal 1489 Dest.AggregateVal.resize(size); 1490 1491 if (DstVecTy->getTypeID() == Type::FloatTyID) { 1492 assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction"); 1493 for (unsigned i = 0; i < size; i++) 1494 Dest.AggregateVal[i].FloatVal = 1495 APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal); 1496 } else { 1497 for (unsigned i = 0; i < size; i++) 1498 Dest.AggregateVal[i].DoubleVal = 1499 APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal); 1500 } 1501 } else { 1502 // scalar 1503 assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction"); 1504 1505 if (DstTy->getTypeID() == Type::FloatTyID) 1506 Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal); 1507 else { 1508 Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal); 1509 } 1510 } 1511 1512 return Dest; 1513 } 1514 1515 GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy, 1516 ExecutionContext &SF) { 1517 uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); 1518 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1519 assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction"); 1520 1521 Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal); 1522 return Dest; 1523 } 1524 1525 GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy, 1526 ExecutionContext &SF) { 1527 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1528 assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction"); 1529 1530 uint32_t PtrSize = getDataLayout().getPointerSizeInBits(); 1531 if (PtrSize != Src.IntVal.getBitWidth()) 1532 Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize); 1533 1534 Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue())); 1535 return Dest; 1536 } 1537 1538 GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, 1539 ExecutionContext &SF) { 1540 1541 // This instruction supports bitwise conversion of vectors to integers and 1542 // to vectors of other types (as long as they have the same size) 1543 Type *SrcTy = SrcVal->getType(); 1544 GenericValue Dest, Src = getOperandValue(SrcVal, SF); 1545 1546 if ((SrcTy->getTypeID() == Type::VectorTyID) || 1547 (DstTy->getTypeID() == Type::VectorTyID)) { 1548 // vector src bitcast to vector dst or vector src bitcast to scalar dst or 1549 // scalar src bitcast to vector dst 1550 bool isLittleEndian = getDataLayout().isLittleEndian(); 1551 GenericValue TempDst, TempSrc, SrcVec; 1552 Type *SrcElemTy; 1553 Type *DstElemTy; 1554 unsigned SrcBitSize; 1555 unsigned DstBitSize; 1556 unsigned SrcNum; 1557 unsigned DstNum; 1558 1559 if (SrcTy->getTypeID() == Type::VectorTyID) { 1560 SrcElemTy = SrcTy->getScalarType(); 1561 SrcBitSize = SrcTy->getScalarSizeInBits(); 1562 SrcNum = Src.AggregateVal.size(); 1563 SrcVec = Src; 1564 } else { 1565 // if src is scalar value, make it vector <1 x type> 1566 SrcElemTy = SrcTy; 1567 SrcBitSize = SrcTy->getPrimitiveSizeInBits(); 1568 SrcNum = 1; 1569 SrcVec.AggregateVal.push_back(Src); 1570 } 1571 1572 if (DstTy->getTypeID() == Type::VectorTyID) { 1573 DstElemTy = DstTy->getScalarType(); 1574 DstBitSize = DstTy->getScalarSizeInBits(); 1575 DstNum = (SrcNum * SrcBitSize) / DstBitSize; 1576 } else { 1577 DstElemTy = DstTy; 1578 DstBitSize = DstTy->getPrimitiveSizeInBits(); 1579 DstNum = 1; 1580 } 1581 1582 if (SrcNum * SrcBitSize != DstNum * DstBitSize) 1583 llvm_unreachable("Invalid BitCast"); 1584 1585 // If src is floating point, cast to integer first. 1586 TempSrc.AggregateVal.resize(SrcNum); 1587 if (SrcElemTy->isFloatTy()) { 1588 for (unsigned i = 0; i < SrcNum; i++) 1589 TempSrc.AggregateVal[i].IntVal = 1590 APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal); 1591 1592 } else if (SrcElemTy->isDoubleTy()) { 1593 for (unsigned i = 0; i < SrcNum; i++) 1594 TempSrc.AggregateVal[i].IntVal = 1595 APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal); 1596 } else if (SrcElemTy->isIntegerTy()) { 1597 for (unsigned i = 0; i < SrcNum; i++) 1598 TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal; 1599 } else { 1600 // Pointers are not allowed as the element type of vector. 1601 llvm_unreachable("Invalid Bitcast"); 1602 } 1603 1604 // now TempSrc is integer type vector 1605 if (DstNum < SrcNum) { 1606 // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64> 1607 unsigned Ratio = SrcNum / DstNum; 1608 unsigned SrcElt = 0; 1609 for (unsigned i = 0; i < DstNum; i++) { 1610 GenericValue Elt; 1611 Elt.IntVal = 0; 1612 Elt.IntVal = Elt.IntVal.zext(DstBitSize); 1613 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1); 1614 for (unsigned j = 0; j < Ratio; j++) { 1615 APInt Tmp; 1616 Tmp = Tmp.zext(SrcBitSize); 1617 Tmp = TempSrc.AggregateVal[SrcElt++].IntVal; 1618 Tmp = Tmp.zext(DstBitSize); 1619 Tmp <<= ShiftAmt; 1620 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 1621 Elt.IntVal |= Tmp; 1622 } 1623 TempDst.AggregateVal.push_back(Elt); 1624 } 1625 } else { 1626 // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32> 1627 unsigned Ratio = DstNum / SrcNum; 1628 for (unsigned i = 0; i < SrcNum; i++) { 1629 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1); 1630 for (unsigned j = 0; j < Ratio; j++) { 1631 GenericValue Elt; 1632 Elt.IntVal = Elt.IntVal.zext(SrcBitSize); 1633 Elt.IntVal = TempSrc.AggregateVal[i].IntVal; 1634 Elt.IntVal.lshrInPlace(ShiftAmt); 1635 // it could be DstBitSize == SrcBitSize, so check it 1636 if (DstBitSize < SrcBitSize) 1637 Elt.IntVal = Elt.IntVal.trunc(DstBitSize); 1638 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 1639 TempDst.AggregateVal.push_back(Elt); 1640 } 1641 } 1642 } 1643 1644 // convert result from integer to specified type 1645 if (DstTy->getTypeID() == Type::VectorTyID) { 1646 if (DstElemTy->isDoubleTy()) { 1647 Dest.AggregateVal.resize(DstNum); 1648 for (unsigned i = 0; i < DstNum; i++) 1649 Dest.AggregateVal[i].DoubleVal = 1650 TempDst.AggregateVal[i].IntVal.bitsToDouble(); 1651 } else if (DstElemTy->isFloatTy()) { 1652 Dest.AggregateVal.resize(DstNum); 1653 for (unsigned i = 0; i < DstNum; i++) 1654 Dest.AggregateVal[i].FloatVal = 1655 TempDst.AggregateVal[i].IntVal.bitsToFloat(); 1656 } else { 1657 Dest = TempDst; 1658 } 1659 } else { 1660 if (DstElemTy->isDoubleTy()) 1661 Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble(); 1662 else if (DstElemTy->isFloatTy()) { 1663 Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat(); 1664 } else { 1665 Dest.IntVal = TempDst.AggregateVal[0].IntVal; 1666 } 1667 } 1668 } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) || 1669 // (DstTy->getTypeID() == Type::VectorTyID)) 1670 1671 // scalar src bitcast to scalar dst 1672 if (DstTy->isPointerTy()) { 1673 assert(SrcTy->isPointerTy() && "Invalid BitCast"); 1674 Dest.PointerVal = Src.PointerVal; 1675 } else if (DstTy->isIntegerTy()) { 1676 if (SrcTy->isFloatTy()) 1677 Dest.IntVal = APInt::floatToBits(Src.FloatVal); 1678 else if (SrcTy->isDoubleTy()) { 1679 Dest.IntVal = APInt::doubleToBits(Src.DoubleVal); 1680 } else if (SrcTy->isIntegerTy()) { 1681 Dest.IntVal = Src.IntVal; 1682 } else { 1683 llvm_unreachable("Invalid BitCast"); 1684 } 1685 } else if (DstTy->isFloatTy()) { 1686 if (SrcTy->isIntegerTy()) 1687 Dest.FloatVal = Src.IntVal.bitsToFloat(); 1688 else { 1689 Dest.FloatVal = Src.FloatVal; 1690 } 1691 } else if (DstTy->isDoubleTy()) { 1692 if (SrcTy->isIntegerTy()) 1693 Dest.DoubleVal = Src.IntVal.bitsToDouble(); 1694 else { 1695 Dest.DoubleVal = Src.DoubleVal; 1696 } 1697 } else { 1698 llvm_unreachable("Invalid Bitcast"); 1699 } 1700 } 1701 1702 return Dest; 1703 } 1704 1705 void Interpreter::visitTruncInst(TruncInst &I) { 1706 ExecutionContext &SF = ECStack.back(); 1707 SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF); 1708 } 1709 1710 void Interpreter::visitSExtInst(SExtInst &I) { 1711 ExecutionContext &SF = ECStack.back(); 1712 SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF); 1713 } 1714 1715 void Interpreter::visitZExtInst(ZExtInst &I) { 1716 ExecutionContext &SF = ECStack.back(); 1717 SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF); 1718 } 1719 1720 void Interpreter::visitFPTruncInst(FPTruncInst &I) { 1721 ExecutionContext &SF = ECStack.back(); 1722 SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF); 1723 } 1724 1725 void Interpreter::visitFPExtInst(FPExtInst &I) { 1726 ExecutionContext &SF = ECStack.back(); 1727 SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF); 1728 } 1729 1730 void Interpreter::visitUIToFPInst(UIToFPInst &I) { 1731 ExecutionContext &SF = ECStack.back(); 1732 SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF); 1733 } 1734 1735 void Interpreter::visitSIToFPInst(SIToFPInst &I) { 1736 ExecutionContext &SF = ECStack.back(); 1737 SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF); 1738 } 1739 1740 void Interpreter::visitFPToUIInst(FPToUIInst &I) { 1741 ExecutionContext &SF = ECStack.back(); 1742 SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF); 1743 } 1744 1745 void Interpreter::visitFPToSIInst(FPToSIInst &I) { 1746 ExecutionContext &SF = ECStack.back(); 1747 SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF); 1748 } 1749 1750 void Interpreter::visitPtrToIntInst(PtrToIntInst &I) { 1751 ExecutionContext &SF = ECStack.back(); 1752 SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF); 1753 } 1754 1755 void Interpreter::visitIntToPtrInst(IntToPtrInst &I) { 1756 ExecutionContext &SF = ECStack.back(); 1757 SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF); 1758 } 1759 1760 void Interpreter::visitBitCastInst(BitCastInst &I) { 1761 ExecutionContext &SF = ECStack.back(); 1762 SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF); 1763 } 1764 1765 #define IMPLEMENT_VAARG(TY) \ 1766 case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break 1767 1768 void Interpreter::visitVAArgInst(VAArgInst &I) { 1769 ExecutionContext &SF = ECStack.back(); 1770 1771 // Get the incoming valist parameter. LLI treats the valist as a 1772 // (ec-stack-depth var-arg-index) pair. 1773 GenericValue VAList = getOperandValue(I.getOperand(0), SF); 1774 GenericValue Dest; 1775 GenericValue Src = ECStack[VAList.UIntPairVal.first] 1776 .VarArgs[VAList.UIntPairVal.second]; 1777 Type *Ty = I.getType(); 1778 switch (Ty->getTypeID()) { 1779 case Type::IntegerTyID: 1780 Dest.IntVal = Src.IntVal; 1781 break; 1782 IMPLEMENT_VAARG(Pointer); 1783 IMPLEMENT_VAARG(Float); 1784 IMPLEMENT_VAARG(Double); 1785 default: 1786 dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n"; 1787 llvm_unreachable(nullptr); 1788 } 1789 1790 // Set the Value of this Instruction. 1791 SetValue(&I, Dest, SF); 1792 1793 // Move the pointer to the next vararg. 1794 ++VAList.UIntPairVal.second; 1795 } 1796 1797 void Interpreter::visitExtractElementInst(ExtractElementInst &I) { 1798 ExecutionContext &SF = ECStack.back(); 1799 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1800 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1801 GenericValue Dest; 1802 1803 Type *Ty = I.getType(); 1804 const unsigned indx = unsigned(Src2.IntVal.getZExtValue()); 1805 1806 if(Src1.AggregateVal.size() > indx) { 1807 switch (Ty->getTypeID()) { 1808 default: 1809 dbgs() << "Unhandled destination type for extractelement instruction: " 1810 << *Ty << "\n"; 1811 llvm_unreachable(nullptr); 1812 break; 1813 case Type::IntegerTyID: 1814 Dest.IntVal = Src1.AggregateVal[indx].IntVal; 1815 break; 1816 case Type::FloatTyID: 1817 Dest.FloatVal = Src1.AggregateVal[indx].FloatVal; 1818 break; 1819 case Type::DoubleTyID: 1820 Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal; 1821 break; 1822 } 1823 } else { 1824 dbgs() << "Invalid index in extractelement instruction\n"; 1825 } 1826 1827 SetValue(&I, Dest, SF); 1828 } 1829 1830 void Interpreter::visitInsertElementInst(InsertElementInst &I) { 1831 ExecutionContext &SF = ECStack.back(); 1832 VectorType *Ty = cast<VectorType>(I.getType()); 1833 1834 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1835 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1836 GenericValue Src3 = getOperandValue(I.getOperand(2), SF); 1837 GenericValue Dest; 1838 1839 Type *TyContained = Ty->getElementType(); 1840 1841 const unsigned indx = unsigned(Src3.IntVal.getZExtValue()); 1842 Dest.AggregateVal = Src1.AggregateVal; 1843 1844 if(Src1.AggregateVal.size() <= indx) 1845 llvm_unreachable("Invalid index in insertelement instruction"); 1846 switch (TyContained->getTypeID()) { 1847 default: 1848 llvm_unreachable("Unhandled dest type for insertelement instruction"); 1849 case Type::IntegerTyID: 1850 Dest.AggregateVal[indx].IntVal = Src2.IntVal; 1851 break; 1852 case Type::FloatTyID: 1853 Dest.AggregateVal[indx].FloatVal = Src2.FloatVal; 1854 break; 1855 case Type::DoubleTyID: 1856 Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal; 1857 break; 1858 } 1859 SetValue(&I, Dest, SF); 1860 } 1861 1862 void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ 1863 ExecutionContext &SF = ECStack.back(); 1864 1865 VectorType *Ty = cast<VectorType>(I.getType()); 1866 1867 GenericValue Src1 = getOperandValue(I.getOperand(0), SF); 1868 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1869 GenericValue Src3 = getOperandValue(I.getOperand(2), SF); 1870 GenericValue Dest; 1871 1872 // There is no need to check types of src1 and src2, because the compiled 1873 // bytecode can't contain different types for src1 and src2 for a 1874 // shufflevector instruction. 1875 1876 Type *TyContained = Ty->getElementType(); 1877 unsigned src1Size = (unsigned)Src1.AggregateVal.size(); 1878 unsigned src2Size = (unsigned)Src2.AggregateVal.size(); 1879 unsigned src3Size = (unsigned)Src3.AggregateVal.size(); 1880 1881 Dest.AggregateVal.resize(src3Size); 1882 1883 switch (TyContained->getTypeID()) { 1884 default: 1885 llvm_unreachable("Unhandled dest type for insertelement instruction"); 1886 break; 1887 case Type::IntegerTyID: 1888 for( unsigned i=0; i<src3Size; i++) { 1889 unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); 1890 if(j < src1Size) 1891 Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal; 1892 else if(j < src1Size + src2Size) 1893 Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal; 1894 else 1895 // The selector may not be greater than sum of lengths of first and 1896 // second operands and llasm should not allow situation like 1897 // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef, 1898 // <2 x i32> < i32 0, i32 5 >, 1899 // where i32 5 is invalid, but let it be additional check here: 1900 llvm_unreachable("Invalid mask in shufflevector instruction"); 1901 } 1902 break; 1903 case Type::FloatTyID: 1904 for( unsigned i=0; i<src3Size; i++) { 1905 unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); 1906 if(j < src1Size) 1907 Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal; 1908 else if(j < src1Size + src2Size) 1909 Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal; 1910 else 1911 llvm_unreachable("Invalid mask in shufflevector instruction"); 1912 } 1913 break; 1914 case Type::DoubleTyID: 1915 for( unsigned i=0; i<src3Size; i++) { 1916 unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); 1917 if(j < src1Size) 1918 Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal; 1919 else if(j < src1Size + src2Size) 1920 Dest.AggregateVal[i].DoubleVal = 1921 Src2.AggregateVal[j-src1Size].DoubleVal; 1922 else 1923 llvm_unreachable("Invalid mask in shufflevector instruction"); 1924 } 1925 break; 1926 } 1927 SetValue(&I, Dest, SF); 1928 } 1929 1930 void Interpreter::visitExtractValueInst(ExtractValueInst &I) { 1931 ExecutionContext &SF = ECStack.back(); 1932 Value *Agg = I.getAggregateOperand(); 1933 GenericValue Dest; 1934 GenericValue Src = getOperandValue(Agg, SF); 1935 1936 ExtractValueInst::idx_iterator IdxBegin = I.idx_begin(); 1937 unsigned Num = I.getNumIndices(); 1938 GenericValue *pSrc = &Src; 1939 1940 for (unsigned i = 0 ; i < Num; ++i) { 1941 pSrc = &pSrc->AggregateVal[*IdxBegin]; 1942 ++IdxBegin; 1943 } 1944 1945 Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices()); 1946 switch (IndexedType->getTypeID()) { 1947 default: 1948 llvm_unreachable("Unhandled dest type for extractelement instruction"); 1949 break; 1950 case Type::IntegerTyID: 1951 Dest.IntVal = pSrc->IntVal; 1952 break; 1953 case Type::FloatTyID: 1954 Dest.FloatVal = pSrc->FloatVal; 1955 break; 1956 case Type::DoubleTyID: 1957 Dest.DoubleVal = pSrc->DoubleVal; 1958 break; 1959 case Type::ArrayTyID: 1960 case Type::StructTyID: 1961 case Type::VectorTyID: 1962 Dest.AggregateVal = pSrc->AggregateVal; 1963 break; 1964 case Type::PointerTyID: 1965 Dest.PointerVal = pSrc->PointerVal; 1966 break; 1967 } 1968 1969 SetValue(&I, Dest, SF); 1970 } 1971 1972 void Interpreter::visitInsertValueInst(InsertValueInst &I) { 1973 1974 ExecutionContext &SF = ECStack.back(); 1975 Value *Agg = I.getAggregateOperand(); 1976 1977 GenericValue Src1 = getOperandValue(Agg, SF); 1978 GenericValue Src2 = getOperandValue(I.getOperand(1), SF); 1979 GenericValue Dest = Src1; // Dest is a slightly changed Src1 1980 1981 ExtractValueInst::idx_iterator IdxBegin = I.idx_begin(); 1982 unsigned Num = I.getNumIndices(); 1983 1984 GenericValue *pDest = &Dest; 1985 for (unsigned i = 0 ; i < Num; ++i) { 1986 pDest = &pDest->AggregateVal[*IdxBegin]; 1987 ++IdxBegin; 1988 } 1989 // pDest points to the target value in the Dest now 1990 1991 Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices()); 1992 1993 switch (IndexedType->getTypeID()) { 1994 default: 1995 llvm_unreachable("Unhandled dest type for insertelement instruction"); 1996 break; 1997 case Type::IntegerTyID: 1998 pDest->IntVal = Src2.IntVal; 1999 break; 2000 case Type::FloatTyID: 2001 pDest->FloatVal = Src2.FloatVal; 2002 break; 2003 case Type::DoubleTyID: 2004 pDest->DoubleVal = Src2.DoubleVal; 2005 break; 2006 case Type::ArrayTyID: 2007 case Type::StructTyID: 2008 case Type::VectorTyID: 2009 pDest->AggregateVal = Src2.AggregateVal; 2010 break; 2011 case Type::PointerTyID: 2012 pDest->PointerVal = Src2.PointerVal; 2013 break; 2014 } 2015 2016 SetValue(&I, Dest, SF); 2017 } 2018 2019 GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE, 2020 ExecutionContext &SF) { 2021 switch (CE->getOpcode()) { 2022 case Instruction::Trunc: 2023 return executeTruncInst(CE->getOperand(0), CE->getType(), SF); 2024 case Instruction::ZExt: 2025 return executeZExtInst(CE->getOperand(0), CE->getType(), SF); 2026 case Instruction::SExt: 2027 return executeSExtInst(CE->getOperand(0), CE->getType(), SF); 2028 case Instruction::FPTrunc: 2029 return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF); 2030 case Instruction::FPExt: 2031 return executeFPExtInst(CE->getOperand(0), CE->getType(), SF); 2032 case Instruction::UIToFP: 2033 return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF); 2034 case Instruction::SIToFP: 2035 return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF); 2036 case Instruction::FPToUI: 2037 return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF); 2038 case Instruction::FPToSI: 2039 return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF); 2040 case Instruction::PtrToInt: 2041 return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF); 2042 case Instruction::IntToPtr: 2043 return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF); 2044 case Instruction::BitCast: 2045 return executeBitCastInst(CE->getOperand(0), CE->getType(), SF); 2046 case Instruction::GetElementPtr: 2047 return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE), 2048 gep_type_end(CE), SF); 2049 case Instruction::FCmp: 2050 case Instruction::ICmp: 2051 return executeCmpInst(CE->getPredicate(), 2052 getOperandValue(CE->getOperand(0), SF), 2053 getOperandValue(CE->getOperand(1), SF), 2054 CE->getOperand(0)->getType()); 2055 case Instruction::Select: 2056 return executeSelectInst(getOperandValue(CE->getOperand(0), SF), 2057 getOperandValue(CE->getOperand(1), SF), 2058 getOperandValue(CE->getOperand(2), SF), 2059 CE->getOperand(0)->getType()); 2060 default : 2061 break; 2062 } 2063 2064 // The cases below here require a GenericValue parameter for the result 2065 // so we initialize one, compute it and then return it. 2066 GenericValue Op0 = getOperandValue(CE->getOperand(0), SF); 2067 GenericValue Op1 = getOperandValue(CE->getOperand(1), SF); 2068 GenericValue Dest; 2069 Type * Ty = CE->getOperand(0)->getType(); 2070 switch (CE->getOpcode()) { 2071 case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break; 2072 case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break; 2073 case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break; 2074 case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break; 2075 case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break; 2076 case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break; 2077 case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break; 2078 case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break; 2079 case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break; 2080 case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break; 2081 case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break; 2082 case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break; 2083 case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break; 2084 case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break; 2085 case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break; 2086 case Instruction::Shl: 2087 Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue()); 2088 break; 2089 case Instruction::LShr: 2090 Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue()); 2091 break; 2092 case Instruction::AShr: 2093 Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue()); 2094 break; 2095 default: 2096 dbgs() << "Unhandled ConstantExpr: " << *CE << "\n"; 2097 llvm_unreachable("Unhandled ConstantExpr"); 2098 } 2099 return Dest; 2100 } 2101 2102 GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) { 2103 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 2104 return getConstantExprValue(CE, SF); 2105 } else if (Constant *CPV = dyn_cast<Constant>(V)) { 2106 return getConstantValue(CPV); 2107 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2108 return PTOGV(getPointerToGlobal(GV)); 2109 } else { 2110 return SF.Values[V]; 2111 } 2112 } 2113 2114 //===----------------------------------------------------------------------===// 2115 // Dispatch and Execution Code 2116 //===----------------------------------------------------------------------===// 2117 2118 //===----------------------------------------------------------------------===// 2119 // callFunction - Execute the specified function... 2120 // 2121 void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) { 2122 assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() || 2123 ECStack.back().Caller.arg_size() == ArgVals.size()) && 2124 "Incorrect number of arguments passed into function call!"); 2125 // Make a new stack frame... and fill it in. 2126 ECStack.emplace_back(); 2127 ExecutionContext &StackFrame = ECStack.back(); 2128 StackFrame.CurFunction = F; 2129 2130 // Special handling for external functions. 2131 if (F->isDeclaration()) { 2132 GenericValue Result = callExternalFunction (F, ArgVals); 2133 // Simulate a 'ret' instruction of the appropriate type. 2134 popStackAndReturnValueToCaller (F->getReturnType (), Result); 2135 return; 2136 } 2137 2138 // Get pointers to first LLVM BB & Instruction in function. 2139 StackFrame.CurBB = &F->front(); 2140 StackFrame.CurInst = StackFrame.CurBB->begin(); 2141 2142 // Run through the function arguments and initialize their values... 2143 assert((ArgVals.size() == F->arg_size() || 2144 (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&& 2145 "Invalid number of values passed to function invocation!"); 2146 2147 // Handle non-varargs arguments... 2148 unsigned i = 0; 2149 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); 2150 AI != E; ++AI, ++i) 2151 SetValue(&*AI, ArgVals[i], StackFrame); 2152 2153 // Handle varargs arguments... 2154 StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end()); 2155 } 2156 2157 2158 void Interpreter::run() { 2159 while (!ECStack.empty()) { 2160 // Interpret a single instruction & increment the "PC". 2161 ExecutionContext &SF = ECStack.back(); // Current stack frame 2162 Instruction &I = *SF.CurInst++; // Increment before execute 2163 2164 // Track the number of dynamic instructions executed. 2165 ++NumDynamicInsts; 2166 2167 LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n"); 2168 visit(I); // Dispatch to one of the visit* methods... 2169 } 2170 } 2171