xref: /llvm-project/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp (revision e84bbe91dac610b3bd6ec5a23c26b6e8547e6abe)
1 //===-- Execution.cpp - Implement code to simulate the program ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file contains the actual instruction interpreter.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Interpreter.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/CodeGen/IntrinsicLowering.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DerivedTypes.h"
19 #include "llvm/IR/GetElementPtrTypeIterator.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/MathExtras.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include <algorithm>
27 #include <cmath>
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "interpreter"
31 
32 STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
33 
34 static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
35           cl::desc("make the interpreter print every volatile load and store"));
36 
37 //===----------------------------------------------------------------------===//
38 //                     Various Helper Functions
39 //===----------------------------------------------------------------------===//
40 
41 static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
42   SF.Values[V] = Val;
43 }
44 
45 //===----------------------------------------------------------------------===//
46 //                    Unary Instruction Implementations
47 //===----------------------------------------------------------------------===//
48 
49 static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
50   switch (Ty->getTypeID()) {
51   case Type::FloatTyID:
52     Dest.FloatVal = -Src.FloatVal;
53     break;
54   case Type::DoubleTyID:
55     Dest.DoubleVal = -Src.DoubleVal;
56     break;
57   default:
58     llvm_unreachable("Unhandled type for FNeg instruction");
59   }
60 }
61 
62 void Interpreter::visitUnaryOperator(UnaryOperator &I) {
63   ExecutionContext &SF = ECStack.back();
64   Type *Ty = I.getOperand(0)->getType();
65   GenericValue Src = getOperandValue(I.getOperand(0), SF);
66   GenericValue R; // Result
67 
68   // First process vector operation
69   if (Ty->isVectorTy()) {
70     R.AggregateVal.resize(Src.AggregateVal.size());
71 
72     switch(I.getOpcode()) {
73     default:
74       llvm_unreachable("Don't know how to handle this unary operator");
75       break;
76     case Instruction::FNeg:
77       if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
78         for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
79           R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
80       } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
81         for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
82           R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
83       } else {
84         llvm_unreachable("Unhandled type for FNeg instruction");
85       }
86       break;
87     }
88   } else {
89     switch (I.getOpcode()) {
90     default:
91       llvm_unreachable("Don't know how to handle this unary operator");
92       break;
93     case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
94     }
95   }
96   SetValue(&I, R, SF);
97 }
98 
99 //===----------------------------------------------------------------------===//
100 //                    Binary Instruction Implementations
101 //===----------------------------------------------------------------------===//
102 
103 #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
104    case Type::TY##TyID: \
105      Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
106      break
107 
108 static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
109                             GenericValue Src2, Type *Ty) {
110   switch (Ty->getTypeID()) {
111     IMPLEMENT_BINARY_OPERATOR(+, Float);
112     IMPLEMENT_BINARY_OPERATOR(+, Double);
113   default:
114     dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
115     llvm_unreachable(nullptr);
116   }
117 }
118 
119 static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
120                             GenericValue Src2, Type *Ty) {
121   switch (Ty->getTypeID()) {
122     IMPLEMENT_BINARY_OPERATOR(-, Float);
123     IMPLEMENT_BINARY_OPERATOR(-, Double);
124   default:
125     dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
126     llvm_unreachable(nullptr);
127   }
128 }
129 
130 static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
131                             GenericValue Src2, Type *Ty) {
132   switch (Ty->getTypeID()) {
133     IMPLEMENT_BINARY_OPERATOR(*, Float);
134     IMPLEMENT_BINARY_OPERATOR(*, Double);
135   default:
136     dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
137     llvm_unreachable(nullptr);
138   }
139 }
140 
141 static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
142                             GenericValue Src2, Type *Ty) {
143   switch (Ty->getTypeID()) {
144     IMPLEMENT_BINARY_OPERATOR(/, Float);
145     IMPLEMENT_BINARY_OPERATOR(/, Double);
146   default:
147     dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
148     llvm_unreachable(nullptr);
149   }
150 }
151 
152 static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
153                             GenericValue Src2, Type *Ty) {
154   switch (Ty->getTypeID()) {
155   case Type::FloatTyID:
156     Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
157     break;
158   case Type::DoubleTyID:
159     Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
160     break;
161   default:
162     dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
163     llvm_unreachable(nullptr);
164   }
165 }
166 
167 #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
168    case Type::IntegerTyID:  \
169       Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
170       break;
171 
172 #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY)                        \
173   case Type::VectorTyID: {                                           \
174     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());    \
175     Dest.AggregateVal.resize( Src1.AggregateVal.size() );            \
176     for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++)             \
177       Dest.AggregateVal[_i].IntVal = APInt(1,                        \
178       Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
179   } break;
180 
181 // Handle pointers specially because they must be compared with only as much
182 // width as the host has.  We _do not_ want to be comparing 64 bit values when
183 // running on a 32-bit target, otherwise the upper 32 bits might mess up
184 // comparisons if they contain garbage.
185 #define IMPLEMENT_POINTER_ICMP(OP) \
186    case Type::PointerTyID: \
187       Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
188                             (void*)(intptr_t)Src2.PointerVal); \
189       break;
190 
191 static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
192                                    Type *Ty) {
193   GenericValue Dest;
194   switch (Ty->getTypeID()) {
195     IMPLEMENT_INTEGER_ICMP(eq,Ty);
196     IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
197     IMPLEMENT_POINTER_ICMP(==);
198   default:
199     dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
200     llvm_unreachable(nullptr);
201   }
202   return Dest;
203 }
204 
205 static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
206                                    Type *Ty) {
207   GenericValue Dest;
208   switch (Ty->getTypeID()) {
209     IMPLEMENT_INTEGER_ICMP(ne,Ty);
210     IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
211     IMPLEMENT_POINTER_ICMP(!=);
212   default:
213     dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
214     llvm_unreachable(nullptr);
215   }
216   return Dest;
217 }
218 
219 static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
220                                     Type *Ty) {
221   GenericValue Dest;
222   switch (Ty->getTypeID()) {
223     IMPLEMENT_INTEGER_ICMP(ult,Ty);
224     IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
225     IMPLEMENT_POINTER_ICMP(<);
226   default:
227     dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
228     llvm_unreachable(nullptr);
229   }
230   return Dest;
231 }
232 
233 static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
234                                     Type *Ty) {
235   GenericValue Dest;
236   switch (Ty->getTypeID()) {
237     IMPLEMENT_INTEGER_ICMP(slt,Ty);
238     IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
239     IMPLEMENT_POINTER_ICMP(<);
240   default:
241     dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
242     llvm_unreachable(nullptr);
243   }
244   return Dest;
245 }
246 
247 static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
248                                     Type *Ty) {
249   GenericValue Dest;
250   switch (Ty->getTypeID()) {
251     IMPLEMENT_INTEGER_ICMP(ugt,Ty);
252     IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
253     IMPLEMENT_POINTER_ICMP(>);
254   default:
255     dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
256     llvm_unreachable(nullptr);
257   }
258   return Dest;
259 }
260 
261 static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
262                                     Type *Ty) {
263   GenericValue Dest;
264   switch (Ty->getTypeID()) {
265     IMPLEMENT_INTEGER_ICMP(sgt,Ty);
266     IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
267     IMPLEMENT_POINTER_ICMP(>);
268   default:
269     dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
270     llvm_unreachable(nullptr);
271   }
272   return Dest;
273 }
274 
275 static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
276                                     Type *Ty) {
277   GenericValue Dest;
278   switch (Ty->getTypeID()) {
279     IMPLEMENT_INTEGER_ICMP(ule,Ty);
280     IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
281     IMPLEMENT_POINTER_ICMP(<=);
282   default:
283     dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
284     llvm_unreachable(nullptr);
285   }
286   return Dest;
287 }
288 
289 static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
290                                     Type *Ty) {
291   GenericValue Dest;
292   switch (Ty->getTypeID()) {
293     IMPLEMENT_INTEGER_ICMP(sle,Ty);
294     IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
295     IMPLEMENT_POINTER_ICMP(<=);
296   default:
297     dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
298     llvm_unreachable(nullptr);
299   }
300   return Dest;
301 }
302 
303 static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
304                                     Type *Ty) {
305   GenericValue Dest;
306   switch (Ty->getTypeID()) {
307     IMPLEMENT_INTEGER_ICMP(uge,Ty);
308     IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
309     IMPLEMENT_POINTER_ICMP(>=);
310   default:
311     dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
312     llvm_unreachable(nullptr);
313   }
314   return Dest;
315 }
316 
317 static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
318                                     Type *Ty) {
319   GenericValue Dest;
320   switch (Ty->getTypeID()) {
321     IMPLEMENT_INTEGER_ICMP(sge,Ty);
322     IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
323     IMPLEMENT_POINTER_ICMP(>=);
324   default:
325     dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
326     llvm_unreachable(nullptr);
327   }
328   return Dest;
329 }
330 
331 void Interpreter::visitICmpInst(ICmpInst &I) {
332   ExecutionContext &SF = ECStack.back();
333   Type *Ty    = I.getOperand(0)->getType();
334   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
335   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
336   GenericValue R;   // Result
337 
338   switch (I.getPredicate()) {
339   case ICmpInst::ICMP_EQ:  R = executeICMP_EQ(Src1,  Src2, Ty); break;
340   case ICmpInst::ICMP_NE:  R = executeICMP_NE(Src1,  Src2, Ty); break;
341   case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
342   case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
343   case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
344   case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
345   case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
346   case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
347   case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
348   case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
349   default:
350     dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
351     llvm_unreachable(nullptr);
352   }
353 
354   SetValue(&I, R, SF);
355 }
356 
357 #define IMPLEMENT_FCMP(OP, TY) \
358    case Type::TY##TyID: \
359      Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
360      break
361 
362 #define IMPLEMENT_VECTOR_FCMP_T(OP, TY)                             \
363   assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());     \
364   Dest.AggregateVal.resize( Src1.AggregateVal.size() );             \
365   for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++)              \
366     Dest.AggregateVal[_i].IntVal = APInt(1,                         \
367     Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
368   break;
369 
370 #define IMPLEMENT_VECTOR_FCMP(OP)                                   \
371   case Type::VectorTyID:                                            \
372     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {      \
373       IMPLEMENT_VECTOR_FCMP_T(OP, Float);                           \
374     } else {                                                        \
375         IMPLEMENT_VECTOR_FCMP_T(OP, Double);                        \
376     }
377 
378 static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
379                                    Type *Ty) {
380   GenericValue Dest;
381   switch (Ty->getTypeID()) {
382     IMPLEMENT_FCMP(==, Float);
383     IMPLEMENT_FCMP(==, Double);
384     IMPLEMENT_VECTOR_FCMP(==);
385   default:
386     dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
387     llvm_unreachable(nullptr);
388   }
389   return Dest;
390 }
391 
392 #define IMPLEMENT_SCALAR_NANS(TY, X,Y)                                      \
393   if (TY->isFloatTy()) {                                                    \
394     if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) {             \
395       Dest.IntVal = APInt(1,false);                                         \
396       return Dest;                                                          \
397     }                                                                       \
398   } else {                                                                  \
399     if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) {         \
400       Dest.IntVal = APInt(1,false);                                         \
401       return Dest;                                                          \
402     }                                                                       \
403   }
404 
405 #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG)                                   \
406   assert(X.AggregateVal.size() == Y.AggregateVal.size());                   \
407   Dest.AggregateVal.resize( X.AggregateVal.size() );                        \
408   for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) {                       \
409     if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val ||         \
410         Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val)           \
411       Dest.AggregateVal[_i].IntVal = APInt(1,FLAG);                         \
412     else  {                                                                 \
413       Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG);                        \
414     }                                                                       \
415   }
416 
417 #define MASK_VECTOR_NANS(TY, X,Y, FLAG)                                     \
418   if (TY->isVectorTy()) {                                                   \
419     if (cast<VectorType>(TY)->getElementType()->isFloatTy()) {              \
420       MASK_VECTOR_NANS_T(X, Y, Float, FLAG)                                 \
421     } else {                                                                \
422       MASK_VECTOR_NANS_T(X, Y, Double, FLAG)                                \
423     }                                                                       \
424   }                                                                         \
425 
426 
427 
428 static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
429                                     Type *Ty)
430 {
431   GenericValue Dest;
432   // if input is scalar value and Src1 or Src2 is NaN return false
433   IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
434   // if vector input detect NaNs and fill mask
435   MASK_VECTOR_NANS(Ty, Src1, Src2, false)
436   GenericValue DestMask = Dest;
437   switch (Ty->getTypeID()) {
438     IMPLEMENT_FCMP(!=, Float);
439     IMPLEMENT_FCMP(!=, Double);
440     IMPLEMENT_VECTOR_FCMP(!=);
441     default:
442       dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
443       llvm_unreachable(nullptr);
444   }
445   // in vector case mask out NaN elements
446   if (Ty->isVectorTy())
447     for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
448       if (DestMask.AggregateVal[_i].IntVal == false)
449         Dest.AggregateVal[_i].IntVal = APInt(1,false);
450 
451   return Dest;
452 }
453 
454 static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
455                                    Type *Ty) {
456   GenericValue Dest;
457   switch (Ty->getTypeID()) {
458     IMPLEMENT_FCMP(<=, Float);
459     IMPLEMENT_FCMP(<=, Double);
460     IMPLEMENT_VECTOR_FCMP(<=);
461   default:
462     dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
463     llvm_unreachable(nullptr);
464   }
465   return Dest;
466 }
467 
468 static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
469                                    Type *Ty) {
470   GenericValue Dest;
471   switch (Ty->getTypeID()) {
472     IMPLEMENT_FCMP(>=, Float);
473     IMPLEMENT_FCMP(>=, Double);
474     IMPLEMENT_VECTOR_FCMP(>=);
475   default:
476     dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
477     llvm_unreachable(nullptr);
478   }
479   return Dest;
480 }
481 
482 static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
483                                    Type *Ty) {
484   GenericValue Dest;
485   switch (Ty->getTypeID()) {
486     IMPLEMENT_FCMP(<, Float);
487     IMPLEMENT_FCMP(<, Double);
488     IMPLEMENT_VECTOR_FCMP(<);
489   default:
490     dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
491     llvm_unreachable(nullptr);
492   }
493   return Dest;
494 }
495 
496 static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
497                                      Type *Ty) {
498   GenericValue Dest;
499   switch (Ty->getTypeID()) {
500     IMPLEMENT_FCMP(>, Float);
501     IMPLEMENT_FCMP(>, Double);
502     IMPLEMENT_VECTOR_FCMP(>);
503   default:
504     dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
505     llvm_unreachable(nullptr);
506   }
507   return Dest;
508 }
509 
510 #define IMPLEMENT_UNORDERED(TY, X,Y)                                     \
511   if (TY->isFloatTy()) {                                                 \
512     if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) {          \
513       Dest.IntVal = APInt(1,true);                                       \
514       return Dest;                                                       \
515     }                                                                    \
516   } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
517     Dest.IntVal = APInt(1,true);                                         \
518     return Dest;                                                         \
519   }
520 
521 #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC)                             \
522   if (TY->isVectorTy()) {                                                      \
523     GenericValue DestMask = Dest;                                              \
524     Dest = FUNC(Src1, Src2, Ty);                                               \
525     for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++)                   \
526       if (DestMask.AggregateVal[_i].IntVal == true)                            \
527         Dest.AggregateVal[_i].IntVal = APInt(1, true);                         \
528     return Dest;                                                               \
529   }
530 
531 static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
532                                    Type *Ty) {
533   GenericValue Dest;
534   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
535   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
536   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
537   return executeFCMP_OEQ(Src1, Src2, Ty);
538 
539 }
540 
541 static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
542                                    Type *Ty) {
543   GenericValue Dest;
544   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
545   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
546   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
547   return executeFCMP_ONE(Src1, Src2, Ty);
548 }
549 
550 static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
551                                    Type *Ty) {
552   GenericValue Dest;
553   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
554   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
555   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
556   return executeFCMP_OLE(Src1, Src2, Ty);
557 }
558 
559 static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
560                                    Type *Ty) {
561   GenericValue Dest;
562   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
563   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
564   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
565   return executeFCMP_OGE(Src1, Src2, Ty);
566 }
567 
568 static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
569                                    Type *Ty) {
570   GenericValue Dest;
571   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
572   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
573   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
574   return executeFCMP_OLT(Src1, Src2, Ty);
575 }
576 
577 static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
578                                      Type *Ty) {
579   GenericValue Dest;
580   IMPLEMENT_UNORDERED(Ty, Src1, Src2)
581   MASK_VECTOR_NANS(Ty, Src1, Src2, true)
582   IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
583   return executeFCMP_OGT(Src1, Src2, Ty);
584 }
585 
586 static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
587                                      Type *Ty) {
588   GenericValue Dest;
589   if(Ty->isVectorTy()) {
590     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
591     Dest.AggregateVal.resize( Src1.AggregateVal.size() );
592     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
593       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
594         Dest.AggregateVal[_i].IntVal = APInt(1,
595         ( (Src1.AggregateVal[_i].FloatVal ==
596         Src1.AggregateVal[_i].FloatVal) &&
597         (Src2.AggregateVal[_i].FloatVal ==
598         Src2.AggregateVal[_i].FloatVal)));
599     } else {
600       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
601         Dest.AggregateVal[_i].IntVal = APInt(1,
602         ( (Src1.AggregateVal[_i].DoubleVal ==
603         Src1.AggregateVal[_i].DoubleVal) &&
604         (Src2.AggregateVal[_i].DoubleVal ==
605         Src2.AggregateVal[_i].DoubleVal)));
606     }
607   } else if (Ty->isFloatTy())
608     Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
609                            Src2.FloatVal == Src2.FloatVal));
610   else {
611     Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
612                            Src2.DoubleVal == Src2.DoubleVal));
613   }
614   return Dest;
615 }
616 
617 static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
618                                      Type *Ty) {
619   GenericValue Dest;
620   if(Ty->isVectorTy()) {
621     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
622     Dest.AggregateVal.resize( Src1.AggregateVal.size() );
623     if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
624       for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
625         Dest.AggregateVal[_i].IntVal = APInt(1,
626         ( (Src1.AggregateVal[_i].FloatVal !=
627            Src1.AggregateVal[_i].FloatVal) ||
628           (Src2.AggregateVal[_i].FloatVal !=
629            Src2.AggregateVal[_i].FloatVal)));
630       } else {
631         for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
632           Dest.AggregateVal[_i].IntVal = APInt(1,
633           ( (Src1.AggregateVal[_i].DoubleVal !=
634              Src1.AggregateVal[_i].DoubleVal) ||
635             (Src2.AggregateVal[_i].DoubleVal !=
636              Src2.AggregateVal[_i].DoubleVal)));
637       }
638   } else if (Ty->isFloatTy())
639     Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
640                            Src2.FloatVal != Src2.FloatVal));
641   else {
642     Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
643                            Src2.DoubleVal != Src2.DoubleVal));
644   }
645   return Dest;
646 }
647 
648 static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
649                                      Type *Ty, const bool val) {
650   GenericValue Dest;
651     if(Ty->isVectorTy()) {
652       assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
653       Dest.AggregateVal.resize( Src1.AggregateVal.size() );
654       for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
655         Dest.AggregateVal[_i].IntVal = APInt(1,val);
656     } else {
657       Dest.IntVal = APInt(1, val);
658     }
659 
660     return Dest;
661 }
662 
663 void Interpreter::visitFCmpInst(FCmpInst &I) {
664   ExecutionContext &SF = ECStack.back();
665   Type *Ty    = I.getOperand(0)->getType();
666   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
667   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
668   GenericValue R;   // Result
669 
670   switch (I.getPredicate()) {
671   default:
672     dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
673     llvm_unreachable(nullptr);
674   break;
675   case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
676   break;
677   case FCmpInst::FCMP_TRUE:  R = executeFCMP_BOOL(Src1, Src2, Ty, true);
678   break;
679   case FCmpInst::FCMP_ORD:   R = executeFCMP_ORD(Src1, Src2, Ty); break;
680   case FCmpInst::FCMP_UNO:   R = executeFCMP_UNO(Src1, Src2, Ty); break;
681   case FCmpInst::FCMP_UEQ:   R = executeFCMP_UEQ(Src1, Src2, Ty); break;
682   case FCmpInst::FCMP_OEQ:   R = executeFCMP_OEQ(Src1, Src2, Ty); break;
683   case FCmpInst::FCMP_UNE:   R = executeFCMP_UNE(Src1, Src2, Ty); break;
684   case FCmpInst::FCMP_ONE:   R = executeFCMP_ONE(Src1, Src2, Ty); break;
685   case FCmpInst::FCMP_ULT:   R = executeFCMP_ULT(Src1, Src2, Ty); break;
686   case FCmpInst::FCMP_OLT:   R = executeFCMP_OLT(Src1, Src2, Ty); break;
687   case FCmpInst::FCMP_UGT:   R = executeFCMP_UGT(Src1, Src2, Ty); break;
688   case FCmpInst::FCMP_OGT:   R = executeFCMP_OGT(Src1, Src2, Ty); break;
689   case FCmpInst::FCMP_ULE:   R = executeFCMP_ULE(Src1, Src2, Ty); break;
690   case FCmpInst::FCMP_OLE:   R = executeFCMP_OLE(Src1, Src2, Ty); break;
691   case FCmpInst::FCMP_UGE:   R = executeFCMP_UGE(Src1, Src2, Ty); break;
692   case FCmpInst::FCMP_OGE:   R = executeFCMP_OGE(Src1, Src2, Ty); break;
693   }
694 
695   SetValue(&I, R, SF);
696 }
697 
698 static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
699                                    GenericValue Src2, Type *Ty) {
700   GenericValue Result;
701   switch (predicate) {
702   case ICmpInst::ICMP_EQ:    return executeICMP_EQ(Src1, Src2, Ty);
703   case ICmpInst::ICMP_NE:    return executeICMP_NE(Src1, Src2, Ty);
704   case ICmpInst::ICMP_UGT:   return executeICMP_UGT(Src1, Src2, Ty);
705   case ICmpInst::ICMP_SGT:   return executeICMP_SGT(Src1, Src2, Ty);
706   case ICmpInst::ICMP_ULT:   return executeICMP_ULT(Src1, Src2, Ty);
707   case ICmpInst::ICMP_SLT:   return executeICMP_SLT(Src1, Src2, Ty);
708   case ICmpInst::ICMP_UGE:   return executeICMP_UGE(Src1, Src2, Ty);
709   case ICmpInst::ICMP_SGE:   return executeICMP_SGE(Src1, Src2, Ty);
710   case ICmpInst::ICMP_ULE:   return executeICMP_ULE(Src1, Src2, Ty);
711   case ICmpInst::ICMP_SLE:   return executeICMP_SLE(Src1, Src2, Ty);
712   case FCmpInst::FCMP_ORD:   return executeFCMP_ORD(Src1, Src2, Ty);
713   case FCmpInst::FCMP_UNO:   return executeFCMP_UNO(Src1, Src2, Ty);
714   case FCmpInst::FCMP_OEQ:   return executeFCMP_OEQ(Src1, Src2, Ty);
715   case FCmpInst::FCMP_UEQ:   return executeFCMP_UEQ(Src1, Src2, Ty);
716   case FCmpInst::FCMP_ONE:   return executeFCMP_ONE(Src1, Src2, Ty);
717   case FCmpInst::FCMP_UNE:   return executeFCMP_UNE(Src1, Src2, Ty);
718   case FCmpInst::FCMP_OLT:   return executeFCMP_OLT(Src1, Src2, Ty);
719   case FCmpInst::FCMP_ULT:   return executeFCMP_ULT(Src1, Src2, Ty);
720   case FCmpInst::FCMP_OGT:   return executeFCMP_OGT(Src1, Src2, Ty);
721   case FCmpInst::FCMP_UGT:   return executeFCMP_UGT(Src1, Src2, Ty);
722   case FCmpInst::FCMP_OLE:   return executeFCMP_OLE(Src1, Src2, Ty);
723   case FCmpInst::FCMP_ULE:   return executeFCMP_ULE(Src1, Src2, Ty);
724   case FCmpInst::FCMP_OGE:   return executeFCMP_OGE(Src1, Src2, Ty);
725   case FCmpInst::FCMP_UGE:   return executeFCMP_UGE(Src1, Src2, Ty);
726   case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
727   case FCmpInst::FCMP_TRUE:  return executeFCMP_BOOL(Src1, Src2, Ty, true);
728   default:
729     dbgs() << "Unhandled Cmp predicate\n";
730     llvm_unreachable(nullptr);
731   }
732 }
733 
734 void Interpreter::visitBinaryOperator(BinaryOperator &I) {
735   ExecutionContext &SF = ECStack.back();
736   Type *Ty    = I.getOperand(0)->getType();
737   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
738   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
739   GenericValue R;   // Result
740 
741   // First process vector operation
742   if (Ty->isVectorTy()) {
743     assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
744     R.AggregateVal.resize(Src1.AggregateVal.size());
745 
746     // Macros to execute binary operation 'OP' over integer vectors
747 #define INTEGER_VECTOR_OPERATION(OP)                               \
748     for (unsigned i = 0; i < R.AggregateVal.size(); ++i)           \
749       R.AggregateVal[i].IntVal =                                   \
750       Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
751 
752     // Additional macros to execute binary operations udiv/sdiv/urem/srem since
753     // they have different notation.
754 #define INTEGER_VECTOR_FUNCTION(OP)                                \
755     for (unsigned i = 0; i < R.AggregateVal.size(); ++i)           \
756       R.AggregateVal[i].IntVal =                                   \
757       Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
758 
759     // Macros to execute binary operation 'OP' over floating point type TY
760     // (float or double) vectors
761 #define FLOAT_VECTOR_FUNCTION(OP, TY)                               \
762       for (unsigned i = 0; i < R.AggregateVal.size(); ++i)          \
763         R.AggregateVal[i].TY =                                      \
764         Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
765 
766     // Macros to choose appropriate TY: float or double and run operation
767     // execution
768 #define FLOAT_VECTOR_OP(OP) {                                         \
769   if (cast<VectorType>(Ty)->getElementType()->isFloatTy())            \
770     FLOAT_VECTOR_FUNCTION(OP, FloatVal)                               \
771   else {                                                              \
772     if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())         \
773       FLOAT_VECTOR_FUNCTION(OP, DoubleVal)                            \
774     else {                                                            \
775       dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
776       llvm_unreachable(0);                                            \
777     }                                                                 \
778   }                                                                   \
779 }
780 
781     switch(I.getOpcode()){
782     default:
783       dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
784       llvm_unreachable(nullptr);
785       break;
786     case Instruction::Add:   INTEGER_VECTOR_OPERATION(+) break;
787     case Instruction::Sub:   INTEGER_VECTOR_OPERATION(-) break;
788     case Instruction::Mul:   INTEGER_VECTOR_OPERATION(*) break;
789     case Instruction::UDiv:  INTEGER_VECTOR_FUNCTION(udiv) break;
790     case Instruction::SDiv:  INTEGER_VECTOR_FUNCTION(sdiv) break;
791     case Instruction::URem:  INTEGER_VECTOR_FUNCTION(urem) break;
792     case Instruction::SRem:  INTEGER_VECTOR_FUNCTION(srem) break;
793     case Instruction::And:   INTEGER_VECTOR_OPERATION(&) break;
794     case Instruction::Or:    INTEGER_VECTOR_OPERATION(|) break;
795     case Instruction::Xor:   INTEGER_VECTOR_OPERATION(^) break;
796     case Instruction::FAdd:  FLOAT_VECTOR_OP(+) break;
797     case Instruction::FSub:  FLOAT_VECTOR_OP(-) break;
798     case Instruction::FMul:  FLOAT_VECTOR_OP(*) break;
799     case Instruction::FDiv:  FLOAT_VECTOR_OP(/) break;
800     case Instruction::FRem:
801       if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
802         for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
803           R.AggregateVal[i].FloatVal =
804           fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
805       else {
806         if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
807           for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
808             R.AggregateVal[i].DoubleVal =
809             fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
810         else {
811           dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
812           llvm_unreachable(nullptr);
813         }
814       }
815       break;
816     }
817   } else {
818     switch (I.getOpcode()) {
819     default:
820       dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
821       llvm_unreachable(nullptr);
822       break;
823     case Instruction::Add:   R.IntVal = Src1.IntVal + Src2.IntVal; break;
824     case Instruction::Sub:   R.IntVal = Src1.IntVal - Src2.IntVal; break;
825     case Instruction::Mul:   R.IntVal = Src1.IntVal * Src2.IntVal; break;
826     case Instruction::FAdd:  executeFAddInst(R, Src1, Src2, Ty); break;
827     case Instruction::FSub:  executeFSubInst(R, Src1, Src2, Ty); break;
828     case Instruction::FMul:  executeFMulInst(R, Src1, Src2, Ty); break;
829     case Instruction::FDiv:  executeFDivInst(R, Src1, Src2, Ty); break;
830     case Instruction::FRem:  executeFRemInst(R, Src1, Src2, Ty); break;
831     case Instruction::UDiv:  R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
832     case Instruction::SDiv:  R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
833     case Instruction::URem:  R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
834     case Instruction::SRem:  R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
835     case Instruction::And:   R.IntVal = Src1.IntVal & Src2.IntVal; break;
836     case Instruction::Or:    R.IntVal = Src1.IntVal | Src2.IntVal; break;
837     case Instruction::Xor:   R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
838     }
839   }
840   SetValue(&I, R, SF);
841 }
842 
843 static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
844                                       GenericValue Src3, Type *Ty) {
845     GenericValue Dest;
846     if(Ty->isVectorTy()) {
847       assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
848       assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
849       Dest.AggregateVal.resize( Src1.AggregateVal.size() );
850       for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
851         Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
852           Src3.AggregateVal[i] : Src2.AggregateVal[i];
853     } else {
854       Dest = (Src1.IntVal == 0) ? Src3 : Src2;
855     }
856     return Dest;
857 }
858 
859 void Interpreter::visitSelectInst(SelectInst &I) {
860   ExecutionContext &SF = ECStack.back();
861   Type * Ty = I.getOperand(0)->getType();
862   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
863   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
864   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
865   GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
866   SetValue(&I, R, SF);
867 }
868 
869 //===----------------------------------------------------------------------===//
870 //                     Terminator Instruction Implementations
871 //===----------------------------------------------------------------------===//
872 
873 void Interpreter::exitCalled(GenericValue GV) {
874   // runAtExitHandlers() assumes there are no stack frames, but
875   // if exit() was called, then it had a stack frame. Blow away
876   // the stack before interpreting atexit handlers.
877   ECStack.clear();
878   runAtExitHandlers();
879   exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
880 }
881 
882 /// Pop the last stack frame off of ECStack and then copy the result
883 /// back into the result variable if we are not returning void. The
884 /// result variable may be the ExitValue, or the Value of the calling
885 /// CallInst if there was a previous stack frame. This method may
886 /// invalidate any ECStack iterators you have. This method also takes
887 /// care of switching to the normal destination BB, if we are returning
888 /// from an invoke.
889 ///
890 void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
891                                                  GenericValue Result) {
892   // Pop the current stack frame.
893   ECStack.pop_back();
894 
895   if (ECStack.empty()) {  // Finished main.  Put result into exit code...
896     if (RetTy && !RetTy->isVoidTy()) {          // Nonvoid return type?
897       ExitValue = Result;   // Capture the exit value of the program
898     } else {
899       memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
900     }
901   } else {
902     // If we have a previous stack frame, and we have a previous call,
903     // fill in the return value...
904     ExecutionContext &CallingSF = ECStack.back();
905     if (CallingSF.Caller) {
906       // Save result...
907       if (!CallingSF.Caller->getType()->isVoidTy())
908         SetValue(CallingSF.Caller, Result, CallingSF);
909       if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller))
910         SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
911       CallingSF.Caller = nullptr;             // We returned from the call...
912     }
913   }
914 }
915 
916 void Interpreter::visitReturnInst(ReturnInst &I) {
917   ExecutionContext &SF = ECStack.back();
918   Type *RetTy = Type::getVoidTy(I.getContext());
919   GenericValue Result;
920 
921   // Save away the return value... (if we are not 'ret void')
922   if (I.getNumOperands()) {
923     RetTy  = I.getReturnValue()->getType();
924     Result = getOperandValue(I.getReturnValue(), SF);
925   }
926 
927   popStackAndReturnValueToCaller(RetTy, Result);
928 }
929 
930 void Interpreter::visitUnreachableInst(UnreachableInst &I) {
931   report_fatal_error("Program executed an 'unreachable' instruction!");
932 }
933 
934 void Interpreter::visitBranchInst(BranchInst &I) {
935   ExecutionContext &SF = ECStack.back();
936   BasicBlock *Dest;
937 
938   Dest = I.getSuccessor(0);          // Uncond branches have a fixed dest...
939   if (!I.isUnconditional()) {
940     Value *Cond = I.getCondition();
941     if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
942       Dest = I.getSuccessor(1);
943   }
944   SwitchToNewBasicBlock(Dest, SF);
945 }
946 
947 void Interpreter::visitSwitchInst(SwitchInst &I) {
948   ExecutionContext &SF = ECStack.back();
949   Value* Cond = I.getCondition();
950   Type *ElTy = Cond->getType();
951   GenericValue CondVal = getOperandValue(Cond, SF);
952 
953   // Check to see if any of the cases match...
954   BasicBlock *Dest = nullptr;
955   for (auto Case : I.cases()) {
956     GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
957     if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
958       Dest = cast<BasicBlock>(Case.getCaseSuccessor());
959       break;
960     }
961   }
962   if (!Dest) Dest = I.getDefaultDest();   // No cases matched: use default
963   SwitchToNewBasicBlock(Dest, SF);
964 }
965 
966 void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
967   ExecutionContext &SF = ECStack.back();
968   void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
969   SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
970 }
971 
972 
973 // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
974 // This function handles the actual updating of block and instruction iterators
975 // as well as execution of all of the PHI nodes in the destination block.
976 //
977 // This method does this because all of the PHI nodes must be executed
978 // atomically, reading their inputs before any of the results are updated.  Not
979 // doing this can cause problems if the PHI nodes depend on other PHI nodes for
980 // their inputs.  If the input PHI node is updated before it is read, incorrect
981 // results can happen.  Thus we use a two phase approach.
982 //
983 void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
984   BasicBlock *PrevBB = SF.CurBB;      // Remember where we came from...
985   SF.CurBB   = Dest;                  // Update CurBB to branch destination
986   SF.CurInst = SF.CurBB->begin();     // Update new instruction ptr...
987 
988   if (!isa<PHINode>(SF.CurInst)) return;  // Nothing fancy to do
989 
990   // Loop over all of the PHI nodes in the current block, reading their inputs.
991   std::vector<GenericValue> ResultValues;
992 
993   for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
994     // Search for the value corresponding to this previous bb...
995     int i = PN->getBasicBlockIndex(PrevBB);
996     assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
997     Value *IncomingValue = PN->getIncomingValue(i);
998 
999     // Save the incoming value for this PHI node...
1000     ResultValues.push_back(getOperandValue(IncomingValue, SF));
1001   }
1002 
1003   // Now loop over all of the PHI nodes setting their values...
1004   SF.CurInst = SF.CurBB->begin();
1005   for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
1006     PHINode *PN = cast<PHINode>(SF.CurInst);
1007     SetValue(PN, ResultValues[i], SF);
1008   }
1009 }
1010 
1011 //===----------------------------------------------------------------------===//
1012 //                     Memory Instruction Implementations
1013 //===----------------------------------------------------------------------===//
1014 
1015 void Interpreter::visitAllocaInst(AllocaInst &I) {
1016   ExecutionContext &SF = ECStack.back();
1017 
1018   Type *Ty = I.getType()->getElementType();  // Type to be allocated
1019 
1020   // Get the number of elements being allocated by the array...
1021   unsigned NumElements =
1022     getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
1023 
1024   unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
1025 
1026   // Avoid malloc-ing zero bytes, use max()...
1027   unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
1028 
1029   // Allocate enough memory to hold the type...
1030   void *Memory = safe_malloc(MemToAlloc);
1031 
1032   LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
1033                     << " bytes) x " << NumElements << " (Total: " << MemToAlloc
1034                     << ") at " << uintptr_t(Memory) << '\n');
1035 
1036   GenericValue Result = PTOGV(Memory);
1037   assert(Result.PointerVal && "Null pointer returned by malloc!");
1038   SetValue(&I, Result, SF);
1039 
1040   if (I.getOpcode() == Instruction::Alloca)
1041     ECStack.back().Allocas.add(Memory);
1042 }
1043 
1044 // getElementOffset - The workhorse for getelementptr.
1045 //
1046 GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
1047                                               gep_type_iterator E,
1048                                               ExecutionContext &SF) {
1049   assert(Ptr->getType()->isPointerTy() &&
1050          "Cannot getElementOffset of a nonpointer type!");
1051 
1052   uint64_t Total = 0;
1053 
1054   for (; I != E; ++I) {
1055     if (StructType *STy = I.getStructTypeOrNull()) {
1056       const StructLayout *SLO = getDataLayout().getStructLayout(STy);
1057 
1058       const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
1059       unsigned Index = unsigned(CPU->getZExtValue());
1060 
1061       Total += SLO->getElementOffset(Index);
1062     } else {
1063       // Get the index number for the array... which must be long type...
1064       GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
1065 
1066       int64_t Idx;
1067       unsigned BitWidth =
1068         cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
1069       if (BitWidth == 32)
1070         Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
1071       else {
1072         assert(BitWidth == 64 && "Invalid index type for getelementptr");
1073         Idx = (int64_t)IdxGV.IntVal.getZExtValue();
1074       }
1075       Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
1076     }
1077   }
1078 
1079   GenericValue Result;
1080   Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
1081   LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
1082   return Result;
1083 }
1084 
1085 void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
1086   ExecutionContext &SF = ECStack.back();
1087   SetValue(&I, executeGEPOperation(I.getPointerOperand(),
1088                                    gep_type_begin(I), gep_type_end(I), SF), SF);
1089 }
1090 
1091 void Interpreter::visitLoadInst(LoadInst &I) {
1092   ExecutionContext &SF = ECStack.back();
1093   GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1094   GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
1095   GenericValue Result;
1096   LoadValueFromMemory(Result, Ptr, I.getType());
1097   SetValue(&I, Result, SF);
1098   if (I.isVolatile() && PrintVolatile)
1099     dbgs() << "Volatile load " << I;
1100 }
1101 
1102 void Interpreter::visitStoreInst(StoreInst &I) {
1103   ExecutionContext &SF = ECStack.back();
1104   GenericValue Val = getOperandValue(I.getOperand(0), SF);
1105   GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1106   StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
1107                      I.getOperand(0)->getType());
1108   if (I.isVolatile() && PrintVolatile)
1109     dbgs() << "Volatile store: " << I;
1110 }
1111 
1112 //===----------------------------------------------------------------------===//
1113 //                 Miscellaneous Instruction Implementations
1114 //===----------------------------------------------------------------------===//
1115 
1116 void Interpreter::visitVAStartInst(VAStartInst &I) {
1117   ExecutionContext &SF = ECStack.back();
1118   GenericValue ArgIndex;
1119   ArgIndex.UIntPairVal.first = ECStack.size() - 1;
1120   ArgIndex.UIntPairVal.second = 0;
1121   SetValue(&I, ArgIndex, SF);
1122 }
1123 
1124 void Interpreter::visitVAEndInst(VAEndInst &I) {
1125   // va_end is a noop for the interpreter
1126 }
1127 
1128 void Interpreter::visitVACopyInst(VACopyInst &I) {
1129   ExecutionContext &SF = ECStack.back();
1130   SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
1131 }
1132 
1133 void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
1134   ExecutionContext &SF = ECStack.back();
1135 
1136   // If it is an unknown intrinsic function, use the intrinsic lowering
1137   // class to transform it into hopefully tasty LLVM code.
1138   //
1139   BasicBlock::iterator Me(&I);
1140   BasicBlock *Parent = I.getParent();
1141   bool atBegin(Parent->begin() == Me);
1142   if (!atBegin)
1143     --Me;
1144   IL->LowerIntrinsicCall(&I);
1145 
1146   // Restore the CurInst pointer to the first instruction newly inserted, if
1147   // any.
1148   if (atBegin) {
1149     SF.CurInst = Parent->begin();
1150   } else {
1151     SF.CurInst = Me;
1152     ++SF.CurInst;
1153   }
1154 }
1155 
1156 void Interpreter::visitCallBase(CallBase &I) {
1157   ExecutionContext &SF = ECStack.back();
1158 
1159   SF.Caller = &I;
1160   std::vector<GenericValue> ArgVals;
1161   const unsigned NumArgs = SF.Caller->arg_size();
1162   ArgVals.reserve(NumArgs);
1163   for (Value *V : SF.Caller->args())
1164     ArgVals.push_back(getOperandValue(V, SF));
1165 
1166   // To handle indirect calls, we must get the pointer value from the argument
1167   // and treat it as a function pointer.
1168   GenericValue SRC = getOperandValue(SF.Caller->getCalledValue(), SF);
1169   callFunction((Function*)GVTOP(SRC), ArgVals);
1170 }
1171 
1172 // auxiliary function for shift operations
1173 static unsigned getShiftAmount(uint64_t orgShiftAmount,
1174                                llvm::APInt valueToShift) {
1175   unsigned valueWidth = valueToShift.getBitWidth();
1176   if (orgShiftAmount < (uint64_t)valueWidth)
1177     return orgShiftAmount;
1178   // according to the llvm documentation, if orgShiftAmount > valueWidth,
1179   // the result is undfeined. but we do shift by this rule:
1180   return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
1181 }
1182 
1183 
1184 void Interpreter::visitShl(BinaryOperator &I) {
1185   ExecutionContext &SF = ECStack.back();
1186   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1187   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1188   GenericValue Dest;
1189   Type *Ty = I.getType();
1190 
1191   if (Ty->isVectorTy()) {
1192     uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1193     assert(src1Size == Src2.AggregateVal.size());
1194     for (unsigned i = 0; i < src1Size; i++) {
1195       GenericValue Result;
1196       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1197       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1198       Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1199       Dest.AggregateVal.push_back(Result);
1200     }
1201   } else {
1202     // scalar
1203     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1204     llvm::APInt valueToShift = Src1.IntVal;
1205     Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1206   }
1207 
1208   SetValue(&I, Dest, SF);
1209 }
1210 
1211 void Interpreter::visitLShr(BinaryOperator &I) {
1212   ExecutionContext &SF = ECStack.back();
1213   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1214   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1215   GenericValue Dest;
1216   Type *Ty = I.getType();
1217 
1218   if (Ty->isVectorTy()) {
1219     uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1220     assert(src1Size == Src2.AggregateVal.size());
1221     for (unsigned i = 0; i < src1Size; i++) {
1222       GenericValue Result;
1223       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1224       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1225       Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1226       Dest.AggregateVal.push_back(Result);
1227     }
1228   } else {
1229     // scalar
1230     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1231     llvm::APInt valueToShift = Src1.IntVal;
1232     Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1233   }
1234 
1235   SetValue(&I, Dest, SF);
1236 }
1237 
1238 void Interpreter::visitAShr(BinaryOperator &I) {
1239   ExecutionContext &SF = ECStack.back();
1240   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1241   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1242   GenericValue Dest;
1243   Type *Ty = I.getType();
1244 
1245   if (Ty->isVectorTy()) {
1246     size_t src1Size = Src1.AggregateVal.size();
1247     assert(src1Size == Src2.AggregateVal.size());
1248     for (unsigned i = 0; i < src1Size; i++) {
1249       GenericValue Result;
1250       uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1251       llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1252       Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1253       Dest.AggregateVal.push_back(Result);
1254     }
1255   } else {
1256     // scalar
1257     uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1258     llvm::APInt valueToShift = Src1.IntVal;
1259     Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1260   }
1261 
1262   SetValue(&I, Dest, SF);
1263 }
1264 
1265 GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
1266                                            ExecutionContext &SF) {
1267   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1268   Type *SrcTy = SrcVal->getType();
1269   if (SrcTy->isVectorTy()) {
1270     Type *DstVecTy = DstTy->getScalarType();
1271     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1272     unsigned NumElts = Src.AggregateVal.size();
1273     // the sizes of src and dst vectors must be equal
1274     Dest.AggregateVal.resize(NumElts);
1275     for (unsigned i = 0; i < NumElts; i++)
1276       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
1277   } else {
1278     IntegerType *DITy = cast<IntegerType>(DstTy);
1279     unsigned DBitWidth = DITy->getBitWidth();
1280     Dest.IntVal = Src.IntVal.trunc(DBitWidth);
1281   }
1282   return Dest;
1283 }
1284 
1285 GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
1286                                           ExecutionContext &SF) {
1287   Type *SrcTy = SrcVal->getType();
1288   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1289   if (SrcTy->isVectorTy()) {
1290     Type *DstVecTy = DstTy->getScalarType();
1291     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1292     unsigned size = Src.AggregateVal.size();
1293     // the sizes of src and dst vectors must be equal.
1294     Dest.AggregateVal.resize(size);
1295     for (unsigned i = 0; i < size; i++)
1296       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
1297   } else {
1298     auto *DITy = cast<IntegerType>(DstTy);
1299     unsigned DBitWidth = DITy->getBitWidth();
1300     Dest.IntVal = Src.IntVal.sext(DBitWidth);
1301   }
1302   return Dest;
1303 }
1304 
1305 GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
1306                                           ExecutionContext &SF) {
1307   Type *SrcTy = SrcVal->getType();
1308   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1309   if (SrcTy->isVectorTy()) {
1310     Type *DstVecTy = DstTy->getScalarType();
1311     unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1312 
1313     unsigned size = Src.AggregateVal.size();
1314     // the sizes of src and dst vectors must be equal.
1315     Dest.AggregateVal.resize(size);
1316     for (unsigned i = 0; i < size; i++)
1317       Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
1318   } else {
1319     auto *DITy = cast<IntegerType>(DstTy);
1320     unsigned DBitWidth = DITy->getBitWidth();
1321     Dest.IntVal = Src.IntVal.zext(DBitWidth);
1322   }
1323   return Dest;
1324 }
1325 
1326 GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
1327                                              ExecutionContext &SF) {
1328   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1329 
1330   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1331     assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
1332            DstTy->getScalarType()->isFloatTy() &&
1333            "Invalid FPTrunc instruction");
1334 
1335     unsigned size = Src.AggregateVal.size();
1336     // the sizes of src and dst vectors must be equal.
1337     Dest.AggregateVal.resize(size);
1338     for (unsigned i = 0; i < size; i++)
1339       Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
1340   } else {
1341     assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
1342            "Invalid FPTrunc instruction");
1343     Dest.FloatVal = (float)Src.DoubleVal;
1344   }
1345 
1346   return Dest;
1347 }
1348 
1349 GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
1350                                            ExecutionContext &SF) {
1351   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1352 
1353   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1354     assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
1355            DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
1356 
1357     unsigned size = Src.AggregateVal.size();
1358     // the sizes of src and dst vectors must be equal.
1359     Dest.AggregateVal.resize(size);
1360     for (unsigned i = 0; i < size; i++)
1361       Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
1362   } else {
1363     assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
1364            "Invalid FPExt instruction");
1365     Dest.DoubleVal = (double)Src.FloatVal;
1366   }
1367 
1368   return Dest;
1369 }
1370 
1371 GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
1372                                             ExecutionContext &SF) {
1373   Type *SrcTy = SrcVal->getType();
1374   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1375 
1376   if (SrcTy->getTypeID() == Type::VectorTyID) {
1377     Type *DstVecTy = DstTy->getScalarType();
1378     Type *SrcVecTy = SrcTy->getScalarType();
1379     uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1380     unsigned size = Src.AggregateVal.size();
1381     // the sizes of src and dst vectors must be equal.
1382     Dest.AggregateVal.resize(size);
1383 
1384     if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1385       assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1386       for (unsigned i = 0; i < size; i++)
1387         Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1388             Src.AggregateVal[i].FloatVal, DBitWidth);
1389     } else {
1390       for (unsigned i = 0; i < size; i++)
1391         Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1392             Src.AggregateVal[i].DoubleVal, DBitWidth);
1393     }
1394   } else {
1395     // scalar
1396     uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1397     assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1398 
1399     if (SrcTy->getTypeID() == Type::FloatTyID)
1400       Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1401     else {
1402       Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1403     }
1404   }
1405 
1406   return Dest;
1407 }
1408 
1409 GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
1410                                             ExecutionContext &SF) {
1411   Type *SrcTy = SrcVal->getType();
1412   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1413 
1414   if (SrcTy->getTypeID() == Type::VectorTyID) {
1415     Type *DstVecTy = DstTy->getScalarType();
1416     Type *SrcVecTy = SrcTy->getScalarType();
1417     uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1418     unsigned size = Src.AggregateVal.size();
1419     // the sizes of src and dst vectors must be equal
1420     Dest.AggregateVal.resize(size);
1421 
1422     if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1423       assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1424       for (unsigned i = 0; i < size; i++)
1425         Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1426             Src.AggregateVal[i].FloatVal, DBitWidth);
1427     } else {
1428       for (unsigned i = 0; i < size; i++)
1429         Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1430             Src.AggregateVal[i].DoubleVal, DBitWidth);
1431     }
1432   } else {
1433     // scalar
1434     unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1435     assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1436 
1437     if (SrcTy->getTypeID() == Type::FloatTyID)
1438       Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1439     else {
1440       Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1441     }
1442   }
1443   return Dest;
1444 }
1445 
1446 GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
1447                                             ExecutionContext &SF) {
1448   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1449 
1450   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1451     Type *DstVecTy = DstTy->getScalarType();
1452     unsigned size = Src.AggregateVal.size();
1453     // the sizes of src and dst vectors must be equal
1454     Dest.AggregateVal.resize(size);
1455 
1456     if (DstVecTy->getTypeID() == Type::FloatTyID) {
1457       assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1458       for (unsigned i = 0; i < size; i++)
1459         Dest.AggregateVal[i].FloatVal =
1460             APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
1461     } else {
1462       for (unsigned i = 0; i < size; i++)
1463         Dest.AggregateVal[i].DoubleVal =
1464             APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
1465     }
1466   } else {
1467     // scalar
1468     assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1469     if (DstTy->getTypeID() == Type::FloatTyID)
1470       Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
1471     else {
1472       Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
1473     }
1474   }
1475   return Dest;
1476 }
1477 
1478 GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
1479                                             ExecutionContext &SF) {
1480   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1481 
1482   if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1483     Type *DstVecTy = DstTy->getScalarType();
1484     unsigned size = Src.AggregateVal.size();
1485     // the sizes of src and dst vectors must be equal
1486     Dest.AggregateVal.resize(size);
1487 
1488     if (DstVecTy->getTypeID() == Type::FloatTyID) {
1489       assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1490       for (unsigned i = 0; i < size; i++)
1491         Dest.AggregateVal[i].FloatVal =
1492             APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
1493     } else {
1494       for (unsigned i = 0; i < size; i++)
1495         Dest.AggregateVal[i].DoubleVal =
1496             APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
1497     }
1498   } else {
1499     // scalar
1500     assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1501 
1502     if (DstTy->getTypeID() == Type::FloatTyID)
1503       Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
1504     else {
1505       Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
1506     }
1507   }
1508 
1509   return Dest;
1510 }
1511 
1512 GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
1513                                               ExecutionContext &SF) {
1514   uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1515   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1516   assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
1517 
1518   Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
1519   return Dest;
1520 }
1521 
1522 GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
1523                                               ExecutionContext &SF) {
1524   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1525   assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
1526 
1527   uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
1528   if (PtrSize != Src.IntVal.getBitWidth())
1529     Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
1530 
1531   Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
1532   return Dest;
1533 }
1534 
1535 GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
1536                                              ExecutionContext &SF) {
1537 
1538   // This instruction supports bitwise conversion of vectors to integers and
1539   // to vectors of other types (as long as they have the same size)
1540   Type *SrcTy = SrcVal->getType();
1541   GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1542 
1543   if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1544       (DstTy->getTypeID() == Type::VectorTyID)) {
1545     // vector src bitcast to vector dst or vector src bitcast to scalar dst or
1546     // scalar src bitcast to vector dst
1547     bool isLittleEndian = getDataLayout().isLittleEndian();
1548     GenericValue TempDst, TempSrc, SrcVec;
1549     Type *SrcElemTy;
1550     Type *DstElemTy;
1551     unsigned SrcBitSize;
1552     unsigned DstBitSize;
1553     unsigned SrcNum;
1554     unsigned DstNum;
1555 
1556     if (SrcTy->getTypeID() == Type::VectorTyID) {
1557       SrcElemTy = SrcTy->getScalarType();
1558       SrcBitSize = SrcTy->getScalarSizeInBits();
1559       SrcNum = Src.AggregateVal.size();
1560       SrcVec = Src;
1561     } else {
1562       // if src is scalar value, make it vector <1 x type>
1563       SrcElemTy = SrcTy;
1564       SrcBitSize = SrcTy->getPrimitiveSizeInBits();
1565       SrcNum = 1;
1566       SrcVec.AggregateVal.push_back(Src);
1567     }
1568 
1569     if (DstTy->getTypeID() == Type::VectorTyID) {
1570       DstElemTy = DstTy->getScalarType();
1571       DstBitSize = DstTy->getScalarSizeInBits();
1572       DstNum = (SrcNum * SrcBitSize) / DstBitSize;
1573     } else {
1574       DstElemTy = DstTy;
1575       DstBitSize = DstTy->getPrimitiveSizeInBits();
1576       DstNum = 1;
1577     }
1578 
1579     if (SrcNum * SrcBitSize != DstNum * DstBitSize)
1580       llvm_unreachable("Invalid BitCast");
1581 
1582     // If src is floating point, cast to integer first.
1583     TempSrc.AggregateVal.resize(SrcNum);
1584     if (SrcElemTy->isFloatTy()) {
1585       for (unsigned i = 0; i < SrcNum; i++)
1586         TempSrc.AggregateVal[i].IntVal =
1587             APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
1588 
1589     } else if (SrcElemTy->isDoubleTy()) {
1590       for (unsigned i = 0; i < SrcNum; i++)
1591         TempSrc.AggregateVal[i].IntVal =
1592             APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
1593     } else if (SrcElemTy->isIntegerTy()) {
1594       for (unsigned i = 0; i < SrcNum; i++)
1595         TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
1596     } else {
1597       // Pointers are not allowed as the element type of vector.
1598       llvm_unreachable("Invalid Bitcast");
1599     }
1600 
1601     // now TempSrc is integer type vector
1602     if (DstNum < SrcNum) {
1603       // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
1604       unsigned Ratio = SrcNum / DstNum;
1605       unsigned SrcElt = 0;
1606       for (unsigned i = 0; i < DstNum; i++) {
1607         GenericValue Elt;
1608         Elt.IntVal = 0;
1609         Elt.IntVal = Elt.IntVal.zext(DstBitSize);
1610         unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
1611         for (unsigned j = 0; j < Ratio; j++) {
1612           APInt Tmp;
1613           Tmp = Tmp.zext(SrcBitSize);
1614           Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
1615           Tmp = Tmp.zext(DstBitSize);
1616           Tmp <<= ShiftAmt;
1617           ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
1618           Elt.IntVal |= Tmp;
1619         }
1620         TempDst.AggregateVal.push_back(Elt);
1621       }
1622     } else {
1623       // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
1624       unsigned Ratio = DstNum / SrcNum;
1625       for (unsigned i = 0; i < SrcNum; i++) {
1626         unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
1627         for (unsigned j = 0; j < Ratio; j++) {
1628           GenericValue Elt;
1629           Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
1630           Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
1631           Elt.IntVal.lshrInPlace(ShiftAmt);
1632           // it could be DstBitSize == SrcBitSize, so check it
1633           if (DstBitSize < SrcBitSize)
1634             Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
1635           ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
1636           TempDst.AggregateVal.push_back(Elt);
1637         }
1638       }
1639     }
1640 
1641     // convert result from integer to specified type
1642     if (DstTy->getTypeID() == Type::VectorTyID) {
1643       if (DstElemTy->isDoubleTy()) {
1644         Dest.AggregateVal.resize(DstNum);
1645         for (unsigned i = 0; i < DstNum; i++)
1646           Dest.AggregateVal[i].DoubleVal =
1647               TempDst.AggregateVal[i].IntVal.bitsToDouble();
1648       } else if (DstElemTy->isFloatTy()) {
1649         Dest.AggregateVal.resize(DstNum);
1650         for (unsigned i = 0; i < DstNum; i++)
1651           Dest.AggregateVal[i].FloatVal =
1652               TempDst.AggregateVal[i].IntVal.bitsToFloat();
1653       } else {
1654         Dest = TempDst;
1655       }
1656     } else {
1657       if (DstElemTy->isDoubleTy())
1658         Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
1659       else if (DstElemTy->isFloatTy()) {
1660         Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
1661       } else {
1662         Dest.IntVal = TempDst.AggregateVal[0].IntVal;
1663       }
1664     }
1665   } else { //  if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1666            //     (DstTy->getTypeID() == Type::VectorTyID))
1667 
1668     // scalar src bitcast to scalar dst
1669     if (DstTy->isPointerTy()) {
1670       assert(SrcTy->isPointerTy() && "Invalid BitCast");
1671       Dest.PointerVal = Src.PointerVal;
1672     } else if (DstTy->isIntegerTy()) {
1673       if (SrcTy->isFloatTy())
1674         Dest.IntVal = APInt::floatToBits(Src.FloatVal);
1675       else if (SrcTy->isDoubleTy()) {
1676         Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
1677       } else if (SrcTy->isIntegerTy()) {
1678         Dest.IntVal = Src.IntVal;
1679       } else {
1680         llvm_unreachable("Invalid BitCast");
1681       }
1682     } else if (DstTy->isFloatTy()) {
1683       if (SrcTy->isIntegerTy())
1684         Dest.FloatVal = Src.IntVal.bitsToFloat();
1685       else {
1686         Dest.FloatVal = Src.FloatVal;
1687       }
1688     } else if (DstTy->isDoubleTy()) {
1689       if (SrcTy->isIntegerTy())
1690         Dest.DoubleVal = Src.IntVal.bitsToDouble();
1691       else {
1692         Dest.DoubleVal = Src.DoubleVal;
1693       }
1694     } else {
1695       llvm_unreachable("Invalid Bitcast");
1696     }
1697   }
1698 
1699   return Dest;
1700 }
1701 
1702 void Interpreter::visitTruncInst(TruncInst &I) {
1703   ExecutionContext &SF = ECStack.back();
1704   SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
1705 }
1706 
1707 void Interpreter::visitSExtInst(SExtInst &I) {
1708   ExecutionContext &SF = ECStack.back();
1709   SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
1710 }
1711 
1712 void Interpreter::visitZExtInst(ZExtInst &I) {
1713   ExecutionContext &SF = ECStack.back();
1714   SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
1715 }
1716 
1717 void Interpreter::visitFPTruncInst(FPTruncInst &I) {
1718   ExecutionContext &SF = ECStack.back();
1719   SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
1720 }
1721 
1722 void Interpreter::visitFPExtInst(FPExtInst &I) {
1723   ExecutionContext &SF = ECStack.back();
1724   SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
1725 }
1726 
1727 void Interpreter::visitUIToFPInst(UIToFPInst &I) {
1728   ExecutionContext &SF = ECStack.back();
1729   SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1730 }
1731 
1732 void Interpreter::visitSIToFPInst(SIToFPInst &I) {
1733   ExecutionContext &SF = ECStack.back();
1734   SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1735 }
1736 
1737 void Interpreter::visitFPToUIInst(FPToUIInst &I) {
1738   ExecutionContext &SF = ECStack.back();
1739   SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
1740 }
1741 
1742 void Interpreter::visitFPToSIInst(FPToSIInst &I) {
1743   ExecutionContext &SF = ECStack.back();
1744   SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
1745 }
1746 
1747 void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
1748   ExecutionContext &SF = ECStack.back();
1749   SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
1750 }
1751 
1752 void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
1753   ExecutionContext &SF = ECStack.back();
1754   SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
1755 }
1756 
1757 void Interpreter::visitBitCastInst(BitCastInst &I) {
1758   ExecutionContext &SF = ECStack.back();
1759   SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
1760 }
1761 
1762 #define IMPLEMENT_VAARG(TY) \
1763    case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
1764 
1765 void Interpreter::visitVAArgInst(VAArgInst &I) {
1766   ExecutionContext &SF = ECStack.back();
1767 
1768   // Get the incoming valist parameter.  LLI treats the valist as a
1769   // (ec-stack-depth var-arg-index) pair.
1770   GenericValue VAList = getOperandValue(I.getOperand(0), SF);
1771   GenericValue Dest;
1772   GenericValue Src = ECStack[VAList.UIntPairVal.first]
1773                       .VarArgs[VAList.UIntPairVal.second];
1774   Type *Ty = I.getType();
1775   switch (Ty->getTypeID()) {
1776   case Type::IntegerTyID:
1777     Dest.IntVal = Src.IntVal;
1778     break;
1779   IMPLEMENT_VAARG(Pointer);
1780   IMPLEMENT_VAARG(Float);
1781   IMPLEMENT_VAARG(Double);
1782   default:
1783     dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
1784     llvm_unreachable(nullptr);
1785   }
1786 
1787   // Set the Value of this Instruction.
1788   SetValue(&I, Dest, SF);
1789 
1790   // Move the pointer to the next vararg.
1791   ++VAList.UIntPairVal.second;
1792 }
1793 
1794 void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
1795   ExecutionContext &SF = ECStack.back();
1796   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1797   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1798   GenericValue Dest;
1799 
1800   Type *Ty = I.getType();
1801   const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
1802 
1803   if(Src1.AggregateVal.size() > indx) {
1804     switch (Ty->getTypeID()) {
1805     default:
1806       dbgs() << "Unhandled destination type for extractelement instruction: "
1807       << *Ty << "\n";
1808       llvm_unreachable(nullptr);
1809       break;
1810     case Type::IntegerTyID:
1811       Dest.IntVal = Src1.AggregateVal[indx].IntVal;
1812       break;
1813     case Type::FloatTyID:
1814       Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
1815       break;
1816     case Type::DoubleTyID:
1817       Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
1818       break;
1819     }
1820   } else {
1821     dbgs() << "Invalid index in extractelement instruction\n";
1822   }
1823 
1824   SetValue(&I, Dest, SF);
1825 }
1826 
1827 void Interpreter::visitInsertElementInst(InsertElementInst &I) {
1828   ExecutionContext &SF = ECStack.back();
1829   VectorType *Ty = cast<VectorType>(I.getType());
1830 
1831   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1832   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1833   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1834   GenericValue Dest;
1835 
1836   Type *TyContained = Ty->getElementType();
1837 
1838   const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
1839   Dest.AggregateVal = Src1.AggregateVal;
1840 
1841   if(Src1.AggregateVal.size() <= indx)
1842       llvm_unreachable("Invalid index in insertelement instruction");
1843   switch (TyContained->getTypeID()) {
1844     default:
1845       llvm_unreachable("Unhandled dest type for insertelement instruction");
1846     case Type::IntegerTyID:
1847       Dest.AggregateVal[indx].IntVal = Src2.IntVal;
1848       break;
1849     case Type::FloatTyID:
1850       Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
1851       break;
1852     case Type::DoubleTyID:
1853       Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
1854       break;
1855   }
1856   SetValue(&I, Dest, SF);
1857 }
1858 
1859 void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
1860   ExecutionContext &SF = ECStack.back();
1861 
1862   VectorType *Ty = cast<VectorType>(I.getType());
1863 
1864   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1865   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1866   GenericValue Dest;
1867 
1868   // There is no need to check types of src1 and src2, because the compiled
1869   // bytecode can't contain different types for src1 and src2 for a
1870   // shufflevector instruction.
1871 
1872   Type *TyContained = Ty->getElementType();
1873   unsigned src1Size = (unsigned)Src1.AggregateVal.size();
1874   unsigned src2Size = (unsigned)Src2.AggregateVal.size();
1875   unsigned src3Size = I.getShuffleMask().size();
1876 
1877   Dest.AggregateVal.resize(src3Size);
1878 
1879   switch (TyContained->getTypeID()) {
1880     default:
1881       llvm_unreachable("Unhandled dest type for insertelement instruction");
1882       break;
1883     case Type::IntegerTyID:
1884       for( unsigned i=0; i<src3Size; i++) {
1885         unsigned j = std::max(0, I.getMaskValue(i));
1886         if(j < src1Size)
1887           Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
1888         else if(j < src1Size + src2Size)
1889           Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
1890         else
1891           // The selector may not be greater than sum of lengths of first and
1892           // second operands and llasm should not allow situation like
1893           // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
1894           //                      <2 x i32> < i32 0, i32 5 >,
1895           // where i32 5 is invalid, but let it be additional check here:
1896           llvm_unreachable("Invalid mask in shufflevector instruction");
1897       }
1898       break;
1899     case Type::FloatTyID:
1900       for( unsigned i=0; i<src3Size; i++) {
1901         unsigned j = std::max(0, I.getMaskValue(i));
1902         if(j < src1Size)
1903           Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
1904         else if(j < src1Size + src2Size)
1905           Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
1906         else
1907           llvm_unreachable("Invalid mask in shufflevector instruction");
1908         }
1909       break;
1910     case Type::DoubleTyID:
1911       for( unsigned i=0; i<src3Size; i++) {
1912         unsigned j = std::max(0, I.getMaskValue(i));
1913         if(j < src1Size)
1914           Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
1915         else if(j < src1Size + src2Size)
1916           Dest.AggregateVal[i].DoubleVal =
1917             Src2.AggregateVal[j-src1Size].DoubleVal;
1918         else
1919           llvm_unreachable("Invalid mask in shufflevector instruction");
1920       }
1921       break;
1922   }
1923   SetValue(&I, Dest, SF);
1924 }
1925 
1926 void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
1927   ExecutionContext &SF = ECStack.back();
1928   Value *Agg = I.getAggregateOperand();
1929   GenericValue Dest;
1930   GenericValue Src = getOperandValue(Agg, SF);
1931 
1932   ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1933   unsigned Num = I.getNumIndices();
1934   GenericValue *pSrc = &Src;
1935 
1936   for (unsigned i = 0 ; i < Num; ++i) {
1937     pSrc = &pSrc->AggregateVal[*IdxBegin];
1938     ++IdxBegin;
1939   }
1940 
1941   Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1942   switch (IndexedType->getTypeID()) {
1943     default:
1944       llvm_unreachable("Unhandled dest type for extractelement instruction");
1945     break;
1946     case Type::IntegerTyID:
1947       Dest.IntVal = pSrc->IntVal;
1948     break;
1949     case Type::FloatTyID:
1950       Dest.FloatVal = pSrc->FloatVal;
1951     break;
1952     case Type::DoubleTyID:
1953       Dest.DoubleVal = pSrc->DoubleVal;
1954     break;
1955     case Type::ArrayTyID:
1956     case Type::StructTyID:
1957     case Type::VectorTyID:
1958       Dest.AggregateVal = pSrc->AggregateVal;
1959     break;
1960     case Type::PointerTyID:
1961       Dest.PointerVal = pSrc->PointerVal;
1962     break;
1963   }
1964 
1965   SetValue(&I, Dest, SF);
1966 }
1967 
1968 void Interpreter::visitInsertValueInst(InsertValueInst &I) {
1969 
1970   ExecutionContext &SF = ECStack.back();
1971   Value *Agg = I.getAggregateOperand();
1972 
1973   GenericValue Src1 = getOperandValue(Agg, SF);
1974   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1975   GenericValue Dest = Src1; // Dest is a slightly changed Src1
1976 
1977   ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1978   unsigned Num = I.getNumIndices();
1979 
1980   GenericValue *pDest = &Dest;
1981   for (unsigned i = 0 ; i < Num; ++i) {
1982     pDest = &pDest->AggregateVal[*IdxBegin];
1983     ++IdxBegin;
1984   }
1985   // pDest points to the target value in the Dest now
1986 
1987   Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1988 
1989   switch (IndexedType->getTypeID()) {
1990     default:
1991       llvm_unreachable("Unhandled dest type for insertelement instruction");
1992     break;
1993     case Type::IntegerTyID:
1994       pDest->IntVal = Src2.IntVal;
1995     break;
1996     case Type::FloatTyID:
1997       pDest->FloatVal = Src2.FloatVal;
1998     break;
1999     case Type::DoubleTyID:
2000       pDest->DoubleVal = Src2.DoubleVal;
2001     break;
2002     case Type::ArrayTyID:
2003     case Type::StructTyID:
2004     case Type::VectorTyID:
2005       pDest->AggregateVal = Src2.AggregateVal;
2006     break;
2007     case Type::PointerTyID:
2008       pDest->PointerVal = Src2.PointerVal;
2009     break;
2010   }
2011 
2012   SetValue(&I, Dest, SF);
2013 }
2014 
2015 GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
2016                                                 ExecutionContext &SF) {
2017   switch (CE->getOpcode()) {
2018   case Instruction::Trunc:
2019       return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
2020   case Instruction::ZExt:
2021       return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
2022   case Instruction::SExt:
2023       return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
2024   case Instruction::FPTrunc:
2025       return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
2026   case Instruction::FPExt:
2027       return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
2028   case Instruction::UIToFP:
2029       return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
2030   case Instruction::SIToFP:
2031       return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
2032   case Instruction::FPToUI:
2033       return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
2034   case Instruction::FPToSI:
2035       return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
2036   case Instruction::PtrToInt:
2037       return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
2038   case Instruction::IntToPtr:
2039       return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
2040   case Instruction::BitCast:
2041       return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
2042   case Instruction::GetElementPtr:
2043     return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
2044                                gep_type_end(CE), SF);
2045   case Instruction::FCmp:
2046   case Instruction::ICmp:
2047     return executeCmpInst(CE->getPredicate(),
2048                           getOperandValue(CE->getOperand(0), SF),
2049                           getOperandValue(CE->getOperand(1), SF),
2050                           CE->getOperand(0)->getType());
2051   case Instruction::Select:
2052     return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
2053                              getOperandValue(CE->getOperand(1), SF),
2054                              getOperandValue(CE->getOperand(2), SF),
2055                              CE->getOperand(0)->getType());
2056   default :
2057     break;
2058   }
2059 
2060   // The cases below here require a GenericValue parameter for the result
2061   // so we initialize one, compute it and then return it.
2062   GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
2063   GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
2064   GenericValue Dest;
2065   Type * Ty = CE->getOperand(0)->getType();
2066   switch (CE->getOpcode()) {
2067   case Instruction::Add:  Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
2068   case Instruction::Sub:  Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
2069   case Instruction::Mul:  Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
2070   case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
2071   case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
2072   case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
2073   case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
2074   case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
2075   case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
2076   case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
2077   case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
2078   case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
2079   case Instruction::And:  Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
2080   case Instruction::Or:   Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
2081   case Instruction::Xor:  Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
2082   case Instruction::Shl:
2083     Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
2084     break;
2085   case Instruction::LShr:
2086     Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
2087     break;
2088   case Instruction::AShr:
2089     Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
2090     break;
2091   default:
2092     dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
2093     llvm_unreachable("Unhandled ConstantExpr");
2094   }
2095   return Dest;
2096 }
2097 
2098 GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
2099   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
2100     return getConstantExprValue(CE, SF);
2101   } else if (Constant *CPV = dyn_cast<Constant>(V)) {
2102     return getConstantValue(CPV);
2103   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2104     return PTOGV(getPointerToGlobal(GV));
2105   } else {
2106     return SF.Values[V];
2107   }
2108 }
2109 
2110 //===----------------------------------------------------------------------===//
2111 //                        Dispatch and Execution Code
2112 //===----------------------------------------------------------------------===//
2113 
2114 //===----------------------------------------------------------------------===//
2115 // callFunction - Execute the specified function...
2116 //
2117 void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
2118   assert((ECStack.empty() || !ECStack.back().Caller ||
2119           ECStack.back().Caller->arg_size() == ArgVals.size()) &&
2120          "Incorrect number of arguments passed into function call!");
2121   // Make a new stack frame... and fill it in.
2122   ECStack.emplace_back();
2123   ExecutionContext &StackFrame = ECStack.back();
2124   StackFrame.CurFunction = F;
2125 
2126   // Special handling for external functions.
2127   if (F->isDeclaration()) {
2128     GenericValue Result = callExternalFunction (F, ArgVals);
2129     // Simulate a 'ret' instruction of the appropriate type.
2130     popStackAndReturnValueToCaller (F->getReturnType (), Result);
2131     return;
2132   }
2133 
2134   // Get pointers to first LLVM BB & Instruction in function.
2135   StackFrame.CurBB     = &F->front();
2136   StackFrame.CurInst   = StackFrame.CurBB->begin();
2137 
2138   // Run through the function arguments and initialize their values...
2139   assert((ArgVals.size() == F->arg_size() ||
2140          (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
2141          "Invalid number of values passed to function invocation!");
2142 
2143   // Handle non-varargs arguments...
2144   unsigned i = 0;
2145   for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
2146        AI != E; ++AI, ++i)
2147     SetValue(&*AI, ArgVals[i], StackFrame);
2148 
2149   // Handle varargs arguments...
2150   StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
2151 }
2152 
2153 
2154 void Interpreter::run() {
2155   while (!ECStack.empty()) {
2156     // Interpret a single instruction & increment the "PC".
2157     ExecutionContext &SF = ECStack.back();  // Current stack frame
2158     Instruction &I = *SF.CurInst++;         // Increment before execute
2159 
2160     // Track the number of dynamic instructions executed.
2161     ++NumDynamicInsts;
2162 
2163     LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
2164     visit(I);   // Dispatch to one of the visit* methods...
2165   }
2166 }
2167