| /minix3/crypto/external/bsd/openssl/dist/crypto/cast/asm/ |
| H A D | cast-586.pl | 18 $tmp2="ebx"; 58 &mov($tmp2,&wparam(0)); 64 &mov($L,&DWP(0,$tmp2,"",0)); 65 &mov($R,&DWP(4,$tmp2,"",0)); 81 &E_CAST( 0,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 82 &E_CAST( 1,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 83 &E_CAST( 2,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); 84 &E_CAST( 3,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 85 &E_CAST( 4,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 86 &E_CAST( 5,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); [all …]
|
| /minix3/crypto/external/bsd/openssl/dist/crypto/bf/asm/ |
| H A D | bf-586.pl | 16 $tmp2="ebx"; 35 &mov($tmp2,&wparam(0)); 41 &mov($L,&DWP(0,$tmp2,"",0)); 42 &mov($R,&DWP(4,$tmp2,"",0)); 50 &mov($tmp2,&DWP(0,$P,"",0)); 53 &xor($L,$tmp2); 58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 69 &mov($tmp2,&DWP(($BF_ROUNDS+1)*4,$P,"",0)); 72 &xor($L,$tmp2); [all …]
|
| /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/ARM/ |
| H A D | vshift.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
| H A D | vbits.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <16 x i8>* %B 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
| H A D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
| H A D | vshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
| H A D | vadd.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = add <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = add <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = add <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = add <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fadd <2 x float> %tmp1, %tmp2 [all …]
|
| H A D | vsub.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = sub <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = sub <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = sub <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = sub <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fsub <2 x float> %tmp1, %tmp2 [all …]
|
| H A D | vqshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
| H A D | vrev.ll | 7 …%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3… 8 ret <8 x i8> %tmp2 15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 16 ret <4 x i16> %tmp2 23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0> 24 ret <2 x i32> %tmp2 31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0> 32 ret <2 x float> %tmp2 39 …%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i3… 40 ret <16 x i8> %tmp2 [all …]
|
| H A D | vabs.ll | 7 %tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1) 8 ret <8 x i8> %tmp2 15 %tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1) 16 ret <4 x i16> %tmp2 23 %tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1) 24 ret <2 x i32> %tmp2 31 %tmp2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> %tmp1) 32 ret <2 x float> %tmp2 39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1) 40 ret <16 x i8> %tmp2 [all …]
|
| H A D | vldlane.ll | 11 %tmp2 = load i8* %A, align 8 12 %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 3 21 %tmp2 = load i16* %A, align 8 22 %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 2 31 %tmp2 = load i32* %A, align 8 32 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 41 %tmp2 = load i32* %A, align 4 42 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 50 %tmp2 = load float* %A, align 4 51 %tmp3 = insertelement <2 x float> %tmp1, float %tmp2, i32 1 [all …]
|
| H A D | uxtb.ll | 11 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 12 ret i32 %tmp2 17 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 18 ret i32 %tmp2 29 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 30 ret i32 %tmp2 35 %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1] 38 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] 44 %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1] 47 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1] [all …]
|
| H A D | vcnt.ll | 8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1) 9 ret <8 x i8> %tmp2 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 17 ret <16 x i8> %tmp2 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 28 ret <8 x i8> %tmp2 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 36 ret <4 x i16> %tmp2 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 44 ret <2 x i32> %tmp2 [all …]
|
| H A D | vtbl.ll | 11 %tmp2 = load <8 x i8>* %B 12 %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2) 20 %tmp2 = load %struct.__neon_int8x8x2_t* %B 21 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 22 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 31 %tmp2 = load %struct.__neon_int8x8x3_t* %B 32 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0 33 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1 34 %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2 43 %tmp2 = load %struct.__neon_int8x8x4_t* %B [all …]
|
| /minix3/crypto/external/bsd/openssl/dist/crypto/sha/asm/ |
| H A D | sha512-sparcv9.pl | 110 $tmp2="%g5"; 176 sllx @pair[1],$tmp31,$tmp2 ! Xload($i) 181 or $tmp1,$tmp2,$tmp2 182 or @pair[1],$tmp2,$tmp2 184 add $h,$tmp2,$T1 185 $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`] 193 sllx @pair[1],$tmp31,$tmp2 ! Xload($i) 199 or $tmp1,$tmp2,$tmp2 201 or @pair[1],$tmp2,$tmp2 203 add $h,$tmp2,$T1 [all …]
|
| /minix3/external/bsd/llvm/dist/llvm/test/Transforms/Reassociate/ |
| H A D | otherops.ll | 7 ; CHECK-NEXT: %tmp2 = mul i32 %arg, 144 8 ; CHECK-NEXT: ret i32 %tmp2 11 %tmp2 = mul i32 %tmp1, 12 12 ret i32 %tmp2 17 ; CHECK-NEXT: %tmp2 = and i32 %arg, 14 18 ; CHECK-NEXT: ret i32 %tmp2 21 %tmp2 = and i32 %tmp1, 14 22 ret i32 %tmp2 27 ; CHECK-NEXT: %tmp2 = or i32 %arg, 14 28 ; CHECK-NEXT: ret i32 %tmp2 [all …]
|
| H A D | fast-ReassociateVector.ll | 7 ; CHECK-NEXT: %tmp2 = fmul fast <4 x float> %tmp1, zeroinitializer 10 %tmp2 = fmul fast <4 x float> zeroinitializer, %tmp1 11 ret <4 x float> %tmp2 18 ; CHECK-NEXT: %tmp2 = add <2 x i32> %x, %y 19 ; CHECK-NEXT: %tmp3 = add <2 x i32> %tmp1, %tmp2 22 %tmp2 = add <2 x i32> %y, %x 23 %tmp3 = add <2 x i32> %tmp1, %tmp2 30 ; CHECK-NEXT: %tmp2 = mul <2 x i32> %x, %y 31 ; CHECK-NEXT: %tmp3 = mul <2 x i32> %tmp1, %tmp2 34 %tmp2 = mul <2 x i32> %y, %x [all …]
|
| /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/AArch64/ |
| H A D | arm64-neon-scalar-by-elem-mul.ll | 7 %tmp2 = fmul float %a, %tmp1; 8 ret float %tmp2; 15 %tmp2 = fmul float %tmp1, %a; 16 ret float %tmp2; 24 %tmp2 = fmul float %a, %tmp1; 25 ret float %tmp2; 32 %tmp2 = fmul float %tmp1, %a; 33 ret float %tmp2; 41 %tmp2 = fmul double %a, %tmp1; 42 ret double %tmp2; [all …]
|
| H A D | neon-mla-mls.ll | 7 %tmp2 = add <8 x i8> %C, %tmp1; 8 ret <8 x i8> %tmp2 14 %tmp2 = add <16 x i8> %C, %tmp1; 15 ret <16 x i8> %tmp2 21 %tmp2 = add <4 x i16> %C, %tmp1; 22 ret <4 x i16> %tmp2 28 %tmp2 = add <8 x i16> %C, %tmp1; 29 ret <8 x i16> %tmp2 35 %tmp2 = add <2 x i32> %C, %tmp1; 36 ret <2 x i32> %tmp2 [all …]
|
| H A D | neon-scalar-by-elem-fma.ll | 10 %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) 11 ret float %tmp2 18 %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a) 19 ret float %tmp2 26 %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) 27 ret float %tmp2 34 %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) 35 ret double %tmp2 42 %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) 43 ret double %tmp2 [all …]
|
| H A D | arm64-mul.ll | 12 %tmp2 = zext i64 %b to i128 13 %tmp3 = mul i128 %tmp1, %tmp2 23 %tmp2 = sext i64 %b to i128 24 %tmp3 = mul i128 %tmp1, %tmp2 33 %tmp2 = zext i32 %b to i64 34 %tmp3 = mul i64 %tmp1, %tmp2 43 %tmp2 = sext i32 %b to i64 44 %tmp3 = mul i64 %tmp1, %tmp2 53 %tmp2 = zext i32 %b to i64 54 %tmp3 = mul i64 %tmp1, %tmp2 [all …]
|
| H A D | arm64-vsra.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 17 %tmp2 = load <4 x i16>* %B 18 %tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 > 27 %tmp2 = load <2 x i32>* %B 28 %tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 > 37 %tmp2 = load <16 x i8>* %B 38 …%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, … 47 %tmp2 = load <8 x i16>* %B 48 %tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 > [all …]
|
| /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/Thumb2/ |
| H A D | thumb2-orr2.ll | 5 %tmp2 = or i32 %a, 187 6 ret i32 %tmp2 13 %tmp2 = or i32 %a, 11141290 14 ret i32 %tmp2 21 %tmp2 = or i32 %a, 3422604288 22 ret i32 %tmp2 29 %tmp2 = or i32 %a, 1145324612 30 ret i32 %tmp2 37 %tmp2 = or i32 %a, 1114112 38 ret i32 %tmp2
|
| /minix3/crypto/external/bsd/heimdal/dist/lib/hcrypto/libtommath/ |
| H A D | bn_mp_toom_mul.c | 29 mp_int w0, w1, w2, w3, w4, tmp1, tmp2, a0, a1, a2, b0, b1, b2; in mp_toom_mul() local 35 &b2, &tmp1, &tmp2, NULL)) != MP_OKAY) { in mp_toom_mul() 98 if ((res = mp_mul_2(&b0, &tmp2)) != MP_OKAY) { in mp_toom_mul() 101 if ((res = mp_add(&tmp2, &b1, &tmp2)) != MP_OKAY) { in mp_toom_mul() 104 if ((res = mp_mul_2(&tmp2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 107 if ((res = mp_add(&tmp2, &b2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 111 if ((res = mp_mul(&tmp1, &tmp2, &w1)) != MP_OKAY) { in mp_toom_mul() 129 if ((res = mp_mul_2(&b2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 132 if ((res = mp_add(&tmp2, &b1, &tmp2)) != MP_OKAY) { in mp_toom_mul() 135 if ((res = mp_mul_2(&tmp2, &tmp2)) != MP_OKAY) { in mp_toom_mul() [all …]
|