1e71b7053SJung-uk Kim#! /usr/bin/env perl 217f01e99SJung-uk Kim# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved. 3e71b7053SJung-uk Kim# 4*b077aed3SPierre Pronchery# Licensed under the Apache License 2.0 (the "License"). You may not use 5e71b7053SJung-uk Kim# this file except in compliance with the License. You can obtain a copy 6e71b7053SJung-uk Kim# in the file LICENSE in the source distribution or at 7e71b7053SJung-uk Kim# https://www.openssl.org/source/license.html 8e71b7053SJung-uk Kim 9e71b7053SJung-uk Kim# 10e71b7053SJung-uk Kim# ==================================================================== 11e71b7053SJung-uk Kim# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12e71b7053SJung-uk Kim# project. The module is, however, dual licensed under OpenSSL and 13e71b7053SJung-uk Kim# CRYPTOGAMS licenses depending on where you obtain it. For further 14e71b7053SJung-uk Kim# details see http://www.openssl.org/~appro/cryptogams/. 15e71b7053SJung-uk Kim# ==================================================================== 16e71b7053SJung-uk Kim# 17e71b7053SJung-uk Kim# SHA256 for C64x+. 18e71b7053SJung-uk Kim# 19e71b7053SJung-uk Kim# January 2012 20e71b7053SJung-uk Kim# 21e71b7053SJung-uk Kim# Performance is just below 10 cycles per processed byte, which is 22e71b7053SJung-uk Kim# almost 40% faster than compiler-generated code. Unroll is unlikely 23e71b7053SJung-uk Kim# to give more than ~8% improvement... 24e71b7053SJung-uk Kim# 25e71b7053SJung-uk Kim# !!! Note that this module uses AMR, which means that all interrupt 26e71b7053SJung-uk Kim# service routines are expected to preserve it and for own well-being 27e71b7053SJung-uk Kim# zero it upon entry. 28e71b7053SJung-uk Kim 29*b077aed3SPierre Pronchery$output = pop and open STDOUT,">$output"; 30e71b7053SJung-uk Kim 31e71b7053SJung-uk Kim($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments 32e71b7053SJung-uk Kim $K256="A3"; 33e71b7053SJung-uk Kim 34e71b7053SJung-uk Kim($A,$Actx,$B,$Bctx,$C,$Cctx,$D,$Dctx,$T2,$S0,$s1,$t0a,$t1a,$t2a,$X9,$X14) 35e71b7053SJung-uk Kim =map("A$_",(16..31)); 36e71b7053SJung-uk Kim($E,$Ectx,$F,$Fctx,$G,$Gctx,$H,$Hctx,$T1,$S1,$s0,$t0e,$t1e,$t2e,$X1,$X15) 37e71b7053SJung-uk Kim =map("B$_",(16..31)); 38e71b7053SJung-uk Kim 39e71b7053SJung-uk Kim($Xia,$Xib)=("A5","B5"); # circular/ring buffer 40e71b7053SJung-uk Kim $CTXB=$t2e; 41e71b7053SJung-uk Kim 42e71b7053SJung-uk Kim($Xn,$X0,$K)=("B7","B8","B9"); 43e71b7053SJung-uk Kim($Maj,$Ch)=($T2,"B6"); 44e71b7053SJung-uk Kim 45e71b7053SJung-uk Kim$code.=<<___; 46e71b7053SJung-uk Kim .text 47e71b7053SJung-uk Kim 48e71b7053SJung-uk Kim .if .ASSEMBLER_VERSION<7000000 49e71b7053SJung-uk Kim .asg 0,__TI_EABI__ 50e71b7053SJung-uk Kim .endif 51e71b7053SJung-uk Kim .if __TI_EABI__ 52e71b7053SJung-uk Kim .nocmp 53e71b7053SJung-uk Kim .asg sha256_block_data_order,_sha256_block_data_order 54e71b7053SJung-uk Kim .endif 55e71b7053SJung-uk Kim 56e71b7053SJung-uk Kim .asg B3,RA 57e71b7053SJung-uk Kim .asg A15,FP 58e71b7053SJung-uk Kim .asg B15,SP 59e71b7053SJung-uk Kim 60e71b7053SJung-uk Kim .if .BIG_ENDIAN 61e71b7053SJung-uk Kim .asg SWAP2,MV 62e71b7053SJung-uk Kim .asg SWAP4,MV 63e71b7053SJung-uk Kim .endif 64e71b7053SJung-uk Kim 65e71b7053SJung-uk Kim .global _sha256_block_data_order 66e71b7053SJung-uk Kim_sha256_block_data_order: 67e71b7053SJung-uk Kim__sha256_block: 68e71b7053SJung-uk Kim .asmfunc stack_usage(64) 69e71b7053SJung-uk Kim MV $NUM,A0 ; reassign $NUM 70e71b7053SJung-uk Kim|| MVK -64,B0 71e71b7053SJung-uk Kim [!A0] BNOP RA ; if ($NUM==0) return; 72e71b7053SJung-uk Kim|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64) 73e71b7053SJung-uk Kim|| [A0] MV SP,FP 74e71b7053SJung-uk Kim [A0] ADDKPC __sha256_block,B2 75e71b7053SJung-uk Kim|| [A0] AND B0,SP,SP ; align stack at 64 bytes 76e71b7053SJung-uk Kim .if __TI_EABI__ 77e71b7053SJung-uk Kim [A0] MVK 0x00404,B1 78e71b7053SJung-uk Kim|| [A0] MVKL \$PCR_OFFSET(K256,__sha256_block),$K256 79e71b7053SJung-uk Kim [A0] MVKH 0x50000,B1 80e71b7053SJung-uk Kim|| [A0] MVKH \$PCR_OFFSET(K256,__sha256_block),$K256 81e71b7053SJung-uk Kim .else 82e71b7053SJung-uk Kim [A0] MVK 0x00404,B1 83e71b7053SJung-uk Kim|| [A0] MVKL (K256-__sha256_block),$K256 84e71b7053SJung-uk Kim [A0] MVKH 0x50000,B1 85e71b7053SJung-uk Kim|| [A0] MVKH (K256-__sha256_block),$K256 86e71b7053SJung-uk Kim .endif 87e71b7053SJung-uk Kim [A0] MVC B1,AMR ; setup circular addressing 88e71b7053SJung-uk Kim|| [A0] MV SP,$Xia 89e71b7053SJung-uk Kim [A0] MV SP,$Xib 90e71b7053SJung-uk Kim|| [A0] ADD B2,$K256,$K256 91e71b7053SJung-uk Kim|| [A0] MV $CTXA,$CTXB 92e71b7053SJung-uk Kim|| [A0] SUBAW SP,2,SP ; reserve two words above buffer 93e71b7053SJung-uk Kim LDW *${CTXA}[0],$A ; load ctx 94e71b7053SJung-uk Kim|| LDW *${CTXB}[4],$E 95e71b7053SJung-uk Kim LDW *${CTXA}[1],$B 96e71b7053SJung-uk Kim|| LDW *${CTXB}[5],$F 97e71b7053SJung-uk Kim LDW *${CTXA}[2],$C 98e71b7053SJung-uk Kim|| LDW *${CTXB}[6],$G 99e71b7053SJung-uk Kim LDW *${CTXA}[3],$D 100e71b7053SJung-uk Kim|| LDW *${CTXB}[7],$H 101e71b7053SJung-uk Kim 102e71b7053SJung-uk Kim LDNW *$INP++,$Xn ; pre-fetch input 103e71b7053SJung-uk Kim LDW *$K256++,$K ; pre-fetch K256[0] 104e71b7053SJung-uk Kim MVK 14,B0 ; loop counters 105e71b7053SJung-uk Kim MVK 47,B1 106e71b7053SJung-uk Kim|| ADDAW $Xia,9,$Xia 107e71b7053SJung-uk Kimouterloop?: 108e71b7053SJung-uk Kim SUB A0,1,A0 109e71b7053SJung-uk Kim|| MV $A,$Actx 110e71b7053SJung-uk Kim|| MV $E,$Ectx 111e71b7053SJung-uk Kim|| MVD $B,$Bctx 112e71b7053SJung-uk Kim|| MVD $F,$Fctx 113e71b7053SJung-uk Kim MV $C,$Cctx 114e71b7053SJung-uk Kim|| MV $G,$Gctx 115e71b7053SJung-uk Kim|| MVD $D,$Dctx 116e71b7053SJung-uk Kim|| MVD $H,$Hctx 117e71b7053SJung-uk Kim|| SWAP4 $Xn,$X0 118e71b7053SJung-uk Kim 119e71b7053SJung-uk Kim SPLOOPD 8 ; BODY_00_14 120e71b7053SJung-uk Kim|| MVC B0,ILC 121e71b7053SJung-uk Kim|| SWAP2 $X0,$X0 122e71b7053SJung-uk Kim 123e71b7053SJung-uk Kim LDNW *$INP++,$Xn 124e71b7053SJung-uk Kim|| ROTL $A,30,$S0 125e71b7053SJung-uk Kim|| OR $A,$B,$Maj 126e71b7053SJung-uk Kim|| AND $A,$B,$t2a 127e71b7053SJung-uk Kim|| ROTL $E,26,$S1 128e71b7053SJung-uk Kim|| AND $F,$E,$Ch 129e71b7053SJung-uk Kim|| ANDN $G,$E,$t2e 130e71b7053SJung-uk Kim ROTL $A,19,$t0a 131e71b7053SJung-uk Kim|| AND $C,$Maj,$Maj 132e71b7053SJung-uk Kim|| ROTL $E,21,$t0e 133e71b7053SJung-uk Kim|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g) 134e71b7053SJung-uk Kim ROTL $A,10,$t1a 135e71b7053SJung-uk Kim|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b) 136e71b7053SJung-uk Kim|| ROTL $E,7,$t1e 137e71b7053SJung-uk Kim|| ADD $K,$H,$T1 ; T1 = h + K256[i] 138e71b7053SJung-uk Kim ADD $X0,$T1,$T1 ; T1 += X[i]; 139e71b7053SJung-uk Kim|| STW $X0,*$Xib++ 140e71b7053SJung-uk Kim|| XOR $t0a,$S0,$S0 141e71b7053SJung-uk Kim|| XOR $t0e,$S1,$S1 142e71b7053SJung-uk Kim XOR $t1a,$S0,$S0 ; Sigma0(a) 143e71b7053SJung-uk Kim|| XOR $t1e,$S1,$S1 ; Sigma1(e) 144e71b7053SJung-uk Kim|| LDW *$K256++,$K ; pre-fetch K256[i+1] 145e71b7053SJung-uk Kim|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g) 146e71b7053SJung-uk Kim ADD $S1,$T1,$T1 ; T1 += Sigma1(e) 147e71b7053SJung-uk Kim|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c) 148e71b7053SJung-uk Kim|| ROTL $G,0,$H ; h = g 149e71b7053SJung-uk Kim|| MV $F,$G ; g = f 150e71b7053SJung-uk Kim|| MV $X0,$X14 151e71b7053SJung-uk Kim|| SWAP4 $Xn,$X0 152e71b7053SJung-uk Kim SWAP2 $X0,$X0 153e71b7053SJung-uk Kim|| MV $E,$F ; f = e 154e71b7053SJung-uk Kim|| ADD $D,$T1,$E ; e = d + T1 155e71b7053SJung-uk Kim|| MV $C,$D ; d = c 156e71b7053SJung-uk Kim MV $B,$C ; c = b 157e71b7053SJung-uk Kim|| MV $A,$B ; b = a 158e71b7053SJung-uk Kim|| ADD $T1,$T2,$A ; a = T1 + T2 159e71b7053SJung-uk Kim SPKERNEL 160e71b7053SJung-uk Kim 161e71b7053SJung-uk Kim ROTL $A,30,$S0 ; BODY_15 162e71b7053SJung-uk Kim|| OR $A,$B,$Maj 163e71b7053SJung-uk Kim|| AND $A,$B,$t2a 164e71b7053SJung-uk Kim|| ROTL $E,26,$S1 165e71b7053SJung-uk Kim|| AND $F,$E,$Ch 166e71b7053SJung-uk Kim|| ANDN $G,$E,$t2e 167e71b7053SJung-uk Kim|| LDW *${Xib}[1],$Xn ; modulo-scheduled 168e71b7053SJung-uk Kim ROTL $A,19,$t0a 169e71b7053SJung-uk Kim|| AND $C,$Maj,$Maj 170e71b7053SJung-uk Kim|| ROTL $E,21,$t0e 171e71b7053SJung-uk Kim|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g) 172e71b7053SJung-uk Kim|| LDW *${Xib}[2],$X1 ; modulo-scheduled 173e71b7053SJung-uk Kim ROTL $A,10,$t1a 174e71b7053SJung-uk Kim|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b) 175e71b7053SJung-uk Kim|| ROTL $E,7,$t1e 176e71b7053SJung-uk Kim|| ADD $K,$H,$T1 ; T1 = h + K256[i] 177e71b7053SJung-uk Kim ADD $X0,$T1,$T1 ; T1 += X[i]; 178e71b7053SJung-uk Kim|| STW $X0,*$Xib++ 179e71b7053SJung-uk Kim|| XOR $t0a,$S0,$S0 180e71b7053SJung-uk Kim|| XOR $t0e,$S1,$S1 181e71b7053SJung-uk Kim XOR $t1a,$S0,$S0 ; Sigma0(a) 182e71b7053SJung-uk Kim|| XOR $t1e,$S1,$S1 ; Sigma1(e) 183e71b7053SJung-uk Kim|| LDW *$K256++,$K ; pre-fetch K256[i+1] 184e71b7053SJung-uk Kim|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g) 185e71b7053SJung-uk Kim ADD $S1,$T1,$T1 ; T1 += Sigma1(e) 186e71b7053SJung-uk Kim|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c) 187e71b7053SJung-uk Kim|| ROTL $G,0,$H ; h = g 188e71b7053SJung-uk Kim|| MV $F,$G ; g = f 189e71b7053SJung-uk Kim|| MV $X0,$X15 190e71b7053SJung-uk Kim MV $E,$F ; f = e 191e71b7053SJung-uk Kim|| ADD $D,$T1,$E ; e = d + T1 192e71b7053SJung-uk Kim|| MV $C,$D ; d = c 193e71b7053SJung-uk Kim|| MV $Xn,$X0 ; modulo-scheduled 194e71b7053SJung-uk Kim|| LDW *$Xia,$X9 ; modulo-scheduled 195e71b7053SJung-uk Kim|| ROTL $X1,25,$t0e ; modulo-scheduled 196e71b7053SJung-uk Kim|| ROTL $X14,15,$t0a ; modulo-scheduled 197e71b7053SJung-uk Kim SHRU $X1,3,$s0 ; modulo-scheduled 198e71b7053SJung-uk Kim|| SHRU $X14,10,$s1 ; modulo-scheduled 199e71b7053SJung-uk Kim|| ROTL $B,0,$C ; c = b 200e71b7053SJung-uk Kim|| MV $A,$B ; b = a 201e71b7053SJung-uk Kim|| ADD $T1,$T2,$A ; a = T1 + T2 202e71b7053SJung-uk Kim 203e71b7053SJung-uk Kim SPLOOPD 10 ; BODY_16_63 204e71b7053SJung-uk Kim|| MVC B1,ILC 205e71b7053SJung-uk Kim|| ROTL $X1,14,$t1e ; modulo-scheduled 206e71b7053SJung-uk Kim|| ROTL $X14,13,$t1a ; modulo-scheduled 207e71b7053SJung-uk Kim 208e71b7053SJung-uk Kim XOR $t0e,$s0,$s0 209e71b7053SJung-uk Kim|| XOR $t0a,$s1,$s1 210e71b7053SJung-uk Kim|| MV $X15,$X14 211e71b7053SJung-uk Kim|| MV $X1,$Xn 212e71b7053SJung-uk Kim XOR $t1e,$s0,$s0 ; sigma0(X[i+1]) 213e71b7053SJung-uk Kim|| XOR $t1a,$s1,$s1 ; sigma1(X[i+14]) 214e71b7053SJung-uk Kim|| LDW *${Xib}[2],$X1 ; module-scheduled 215e71b7053SJung-uk Kim ROTL $A,30,$S0 216e71b7053SJung-uk Kim|| OR $A,$B,$Maj 217e71b7053SJung-uk Kim|| AND $A,$B,$t2a 218e71b7053SJung-uk Kim|| ROTL $E,26,$S1 219e71b7053SJung-uk Kim|| AND $F,$E,$Ch 220e71b7053SJung-uk Kim|| ANDN $G,$E,$t2e 221e71b7053SJung-uk Kim|| ADD $X9,$X0,$X0 ; X[i] += X[i+9] 222e71b7053SJung-uk Kim ROTL $A,19,$t0a 223e71b7053SJung-uk Kim|| AND $C,$Maj,$Maj 224e71b7053SJung-uk Kim|| ROTL $E,21,$t0e 225e71b7053SJung-uk Kim|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g) 226e71b7053SJung-uk Kim|| ADD $s0,$X0,$X0 ; X[i] += sigma1(X[i+1]) 227e71b7053SJung-uk Kim ROTL $A,10,$t1a 228e71b7053SJung-uk Kim|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b) 229e71b7053SJung-uk Kim|| ROTL $E,7,$t1e 230e71b7053SJung-uk Kim|| ADD $H,$K,$T1 ; T1 = h + K256[i] 231e71b7053SJung-uk Kim|| ADD $s1,$X0,$X0 ; X[i] += sigma1(X[i+14]) 232e71b7053SJung-uk Kim XOR $t0a,$S0,$S0 233e71b7053SJung-uk Kim|| XOR $t0e,$S1,$S1 234e71b7053SJung-uk Kim|| ADD $X0,$T1,$T1 ; T1 += X[i] 235e71b7053SJung-uk Kim|| STW $X0,*$Xib++ 236e71b7053SJung-uk Kim XOR $t1a,$S0,$S0 ; Sigma0(a) 237e71b7053SJung-uk Kim|| XOR $t1e,$S1,$S1 ; Sigma1(e) 238e71b7053SJung-uk Kim|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g) 239e71b7053SJung-uk Kim|| MV $X0,$X15 240e71b7053SJung-uk Kim|| ROTL $G,0,$H ; h = g 241e71b7053SJung-uk Kim|| LDW *$K256++,$K ; pre-fetch K256[i+1] 242e71b7053SJung-uk Kim ADD $S1,$T1,$T1 ; T1 += Sigma1(e) 243e71b7053SJung-uk Kim|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c) 244e71b7053SJung-uk Kim|| MV $F,$G ; g = f 245e71b7053SJung-uk Kim|| MV $Xn,$X0 ; modulo-scheduled 246e71b7053SJung-uk Kim|| LDW *++$Xia,$X9 ; modulo-scheduled 247e71b7053SJung-uk Kim|| ROTL $X1,25,$t0e ; module-scheduled 248e71b7053SJung-uk Kim|| ROTL $X14,15,$t0a ; modulo-scheduled 249e71b7053SJung-uk Kim ROTL $X1,14,$t1e ; modulo-scheduled 250e71b7053SJung-uk Kim|| ROTL $X14,13,$t1a ; modulo-scheduled 251e71b7053SJung-uk Kim|| MV $E,$F ; f = e 252e71b7053SJung-uk Kim|| ADD $D,$T1,$E ; e = d + T1 253e71b7053SJung-uk Kim|| MV $C,$D ; d = c 254e71b7053SJung-uk Kim|| MV $B,$C ; c = b 255e71b7053SJung-uk Kim MV $A,$B ; b = a 256e71b7053SJung-uk Kim|| ADD $T1,$T2,$A ; a = T1 + T2 257e71b7053SJung-uk Kim|| SHRU $X1,3,$s0 ; modulo-scheduled 258e71b7053SJung-uk Kim|| SHRU $X14,10,$s1 ; modulo-scheduled 259e71b7053SJung-uk Kim SPKERNEL 260e71b7053SJung-uk Kim 261e71b7053SJung-uk Kim [A0] B outerloop? 262e71b7053SJung-uk Kim|| [A0] LDNW *$INP++,$Xn ; pre-fetch input 263e71b7053SJung-uk Kim|| [A0] ADDK -260,$K256 ; rewind K256 264e71b7053SJung-uk Kim|| ADD $Actx,$A,$A ; accumulate ctx 265e71b7053SJung-uk Kim|| ADD $Ectx,$E,$E 266e71b7053SJung-uk Kim|| ADD $Bctx,$B,$B 267e71b7053SJung-uk Kim ADD $Fctx,$F,$F 268e71b7053SJung-uk Kim|| ADD $Cctx,$C,$C 269e71b7053SJung-uk Kim|| ADD $Gctx,$G,$G 270e71b7053SJung-uk Kim|| ADD $Dctx,$D,$D 271e71b7053SJung-uk Kim|| ADD $Hctx,$H,$H 272e71b7053SJung-uk Kim|| [A0] LDW *$K256++,$K ; pre-fetch K256[0] 273e71b7053SJung-uk Kim 274e71b7053SJung-uk Kim [!A0] BNOP RA 275e71b7053SJung-uk Kim||[!A0] MV $CTXA,$CTXB 276e71b7053SJung-uk Kim [!A0] MV FP,SP ; restore stack pointer 277e71b7053SJung-uk Kim||[!A0] LDW *FP[0],FP ; restore frame pointer 278e71b7053SJung-uk Kim [!A0] STW $A,*${CTXA}[0] ; save ctx 279e71b7053SJung-uk Kim||[!A0] STW $E,*${CTXB}[4] 280e71b7053SJung-uk Kim||[!A0] MVK 0,B0 281e71b7053SJung-uk Kim [!A0] STW $B,*${CTXA}[1] 282e71b7053SJung-uk Kim||[!A0] STW $F,*${CTXB}[5] 283e71b7053SJung-uk Kim||[!A0] MVC B0,AMR ; clear AMR 284e71b7053SJung-uk Kim STW $C,*${CTXA}[2] 285e71b7053SJung-uk Kim|| STW $G,*${CTXB}[6] 286e71b7053SJung-uk Kim STW $D,*${CTXA}[3] 287e71b7053SJung-uk Kim|| STW $H,*${CTXB}[7] 288e71b7053SJung-uk Kim .endasmfunc 289e71b7053SJung-uk Kim 290e71b7053SJung-uk Kim .if __TI_EABI__ 291e71b7053SJung-uk Kim .sect ".text:sha_asm.const" 292e71b7053SJung-uk Kim .else 293e71b7053SJung-uk Kim .sect ".const:sha_asm" 294e71b7053SJung-uk Kim .endif 295e71b7053SJung-uk Kim .align 128 296e71b7053SJung-uk KimK256: 297e71b7053SJung-uk Kim .uword 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 298e71b7053SJung-uk Kim .uword 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 299e71b7053SJung-uk Kim .uword 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 300e71b7053SJung-uk Kim .uword 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 301e71b7053SJung-uk Kim .uword 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc 302e71b7053SJung-uk Kim .uword 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da 303e71b7053SJung-uk Kim .uword 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 304e71b7053SJung-uk Kim .uword 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 305e71b7053SJung-uk Kim .uword 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 306e71b7053SJung-uk Kim .uword 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 307e71b7053SJung-uk Kim .uword 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 308e71b7053SJung-uk Kim .uword 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 309e71b7053SJung-uk Kim .uword 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 310e71b7053SJung-uk Kim .uword 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 311e71b7053SJung-uk Kim .uword 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 312e71b7053SJung-uk Kim .uword 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 313e71b7053SJung-uk Kim .cstring "SHA256 block transform for C64x+, CRYPTOGAMS by <appro\@openssl.org>" 314e71b7053SJung-uk Kim .align 4 315e71b7053SJung-uk Kim 316e71b7053SJung-uk Kim___ 317e71b7053SJung-uk Kim 318e71b7053SJung-uk Kimprint $code; 31917f01e99SJung-uk Kimclose STDOUT or die "error closing STDOUT: $!"; 320