mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-19 19:01:00 +03:00
73b8f700b6
Currently, only Blake3 x86 Asm code has signs of being ENDBR-aware. At least, under certain conditions it includes some header file and uses some custom macro from there. Linux has its own NOENDBR since several releases ago. It's defined in the same <asm/linkage.h>, so currently <sys/asm_linkage.h> already is provided with it. Let's unify those two into one %ENDBR macro. At first, check if it's present already. If so -- use Linux kernel version. Otherwise, try to go that second way and use %_CET_ENDBR from <cet.h> if available. If no, fall back to just empty definition. This fixes a couple more 'relocations to !ENDBR' across the module. And now that we always have the latest/actual ENDBR definition, use it at the entrance of the few corresponding functions that objtool still complains about. This matches the way how it's used in the upstream x86 core Asm code. Reviewed-by: Attila Fülöp <attila@fueloep.org> Reviewed-by: Tino Reichardt <milky-zfs@mcmilk.de> Reviewed-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Alexander Lobakin <alobakin@pm.me> Closes #14035
2116 lines
37 KiB
ArmAsm
2116 lines
37 KiB
ArmAsm
/*
|
|
* ====================================================================
|
|
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
|
|
* project. Rights for redistribution and usage in source and binary
|
|
* forms are granted according to the OpenSSL license.
|
|
* ====================================================================
|
|
*
|
|
* sha256/512_block procedure for x86_64.
|
|
*
|
|
* 40% improvement over compiler-generated code on Opteron. On EM64T
|
|
* sha256 was observed to run >80% faster and sha512 - >40%. No magical
|
|
* tricks, just straight implementation... I really wonder why gcc
|
|
* [being armed with inline assembler] fails to generate as fast code.
|
|
* The only thing which is cool about this module is that it's very
|
|
* same instruction sequence used for both SHA-256 and SHA-512. In
|
|
* former case the instructions operate on 32-bit operands, while in
|
|
* latter - on 64-bit ones. All I had to do is to get one flavor right,
|
|
* the other one passed the test right away:-)
|
|
*
|
|
* sha256_block runs in ~1005 cycles on Opteron, which gives you
|
|
* asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
|
|
* frequency in GHz. sha512_block runs in ~1275 cycles, which results
|
|
* in 128*1000/1275=100MBps per GHz. Is there room for improvement?
|
|
* Well, if you compare it to IA-64 implementation, which maintains
|
|
* X[16] in register bank[!], tends to 4 instructions per CPU clock
|
|
* cycle and runs in 1003 cycles, 1275 is very good result for 3-way
|
|
* issue Opteron pipeline and X[16] maintained in memory. So that *if*
|
|
* there is a way to improve it, *then* the only way would be to try to
|
|
* offload X[16] updates to SSE unit, but that would require "deeper"
|
|
* loop unroll, which in turn would naturally cause size blow-up, not
|
|
* to mention increased complexity! And once again, only *if* it's
|
|
* actually possible to noticeably improve overall ILP, instruction
|
|
* level parallelism, on a given CPU implementation in this case.
|
|
*
|
|
* Special note on Intel EM64T. While Opteron CPU exhibits perfect
|
|
* performance ratio of 1.5 between 64- and 32-bit flavors [see above],
|
|
* [currently available] EM64T CPUs apparently are far from it. On the
|
|
* contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
|
|
* sha256_block:-( This is presumably because 64-bit shifts/rotates
|
|
* apparently are not atomic instructions, but implemented in microcode.
|
|
*/
|
|
|
|
/*
|
|
* OpenSolaris OS modifications
|
|
*
|
|
* Sun elects to use this software under the BSD license.
|
|
*
|
|
* This source originates from OpenSSL file sha512-x86_64.pl at
|
|
* ftp://ftp.openssl.org/snapshot/openssl-0.9.8-stable-SNAP-20080131.tar.gz
|
|
* (presumably for future OpenSSL release 0.9.8h), with these changes:
|
|
*
|
|
* 1. Added perl "use strict" and declared variables.
|
|
*
|
|
* 2. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
|
|
* /usr/include/sys/asm_linkage.h, .ident keywords, and lint(1B) guards.
|
|
*
|
|
* 3. Removed x86_64-xlate.pl script (not needed for as(1) or gas(1)
|
|
* assemblers). Replaced the .picmeup macro with assembler code.
|
|
*
|
|
* 4. Added 8 to $ctx, as OpenSolaris OS has an extra 4-byte field, "algotype",
|
|
* at the beginning of SHA2_CTX (the next field is 8-byte aligned).
|
|
*/
|
|
|
|
/*
|
|
* This file was generated by a perl script (sha512-x86_64.pl) that were
|
|
* used to generate sha256 and sha512 variants from the same code base.
|
|
* The comments from the original file have been pasted above.
|
|
*/
|
|
|
|
|
|
#if defined(lint) || defined(__lint)
|
|
#include <sys/stdint.h>
|
|
#include <sha2/sha2.h>
|
|
|
|
void
|
|
SHA512TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num)
|
|
{
|
|
(void) ctx, (void) in, (void) num;
|
|
}
|
|
|
|
|
|
#else
|
|
#define _ASM
|
|
#include <sys/asm_linkage.h>
|
|
|
|
ENTRY_NP(SHA512TransformBlocks)
|
|
.cfi_startproc
|
|
ENDBR
|
|
movq %rsp, %rax
|
|
.cfi_def_cfa_register %rax
|
|
push %rbx
|
|
.cfi_offset %rbx,-16
|
|
push %rbp
|
|
.cfi_offset %rbp,-24
|
|
push %r12
|
|
.cfi_offset %r12,-32
|
|
push %r13
|
|
.cfi_offset %r13,-40
|
|
push %r14
|
|
.cfi_offset %r14,-48
|
|
push %r15
|
|
.cfi_offset %r15,-56
|
|
mov %rsp,%rbp # copy %rsp
|
|
shl $4,%rdx # num*16
|
|
sub $16*8+4*8,%rsp
|
|
lea (%rsi,%rdx,8),%rdx # inp+num*16*8
|
|
and $-64,%rsp # align stack frame
|
|
add $8,%rdi # Skip OpenSolaris field, "algotype"
|
|
mov %rdi,16*8+0*8(%rsp) # save ctx, 1st arg
|
|
mov %rsi,16*8+1*8(%rsp) # save inp, 2nd arg
|
|
mov %rdx,16*8+2*8(%rsp) # save end pointer, "3rd" arg
|
|
mov %rbp,16*8+3*8(%rsp) # save copy of %rsp
|
|
# echo ".cfi_cfa_expression %rsp+152,deref,+56" |
|
|
# openssl/crypto/perlasm/x86_64-xlate.pl
|
|
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x38
|
|
|
|
#.picmeup %rbp
|
|
# The .picmeup pseudo-directive, from perlasm/x86_64_xlate.pl, puts
|
|
# the address of the "next" instruction into the target register
|
|
# (%rbp). This generates these 2 instructions:
|
|
lea .Llea(%rip),%rbp
|
|
#nop # .picmeup generates a nop for mod 8 alignment--not needed here
|
|
|
|
.Llea:
|
|
lea K512-.(%rbp),%rbp
|
|
|
|
mov 8*0(%rdi),%rax
|
|
mov 8*1(%rdi),%rbx
|
|
mov 8*2(%rdi),%rcx
|
|
mov 8*3(%rdi),%rdx
|
|
mov 8*4(%rdi),%r8
|
|
mov 8*5(%rdi),%r9
|
|
mov 8*6(%rdi),%r10
|
|
mov 8*7(%rdi),%r11
|
|
jmp .Lloop
|
|
|
|
.align 16
|
|
.Lloop:
|
|
xor %rdi,%rdi
|
|
mov 8*0(%rsi),%r12
|
|
bswap %r12
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
mov %r9,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r10,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r8,%r15 # (f^g)&e
|
|
mov %r12,0(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r11,%r12 # T1+=h
|
|
|
|
mov %rax,%r11
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
|
|
ror $28,%r11
|
|
ror $34,%r13
|
|
mov %rax,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r11
|
|
ror $5,%r13
|
|
or %rcx,%r14 # a|c
|
|
|
|
xor %r13,%r11 # h=Sigma0(a)
|
|
and %rcx,%r15 # a&c
|
|
add %r12,%rdx # d+=T1
|
|
|
|
and %rbx,%r14 # (a|c)&b
|
|
add %r12,%r11 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r11 # h+=Maj(a,b,c)
|
|
mov 8*1(%rsi),%r12
|
|
bswap %r12
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
mov %r8,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r9,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rdx,%r15 # (f^g)&e
|
|
mov %r12,8(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r10,%r12 # T1+=h
|
|
|
|
mov %r11,%r10
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
|
|
ror $28,%r10
|
|
ror $34,%r13
|
|
mov %r11,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r10
|
|
ror $5,%r13
|
|
or %rbx,%r14 # a|c
|
|
|
|
xor %r13,%r10 # h=Sigma0(a)
|
|
and %rbx,%r15 # a&c
|
|
add %r12,%rcx # d+=T1
|
|
|
|
and %rax,%r14 # (a|c)&b
|
|
add %r12,%r10 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r10 # h+=Maj(a,b,c)
|
|
mov 8*2(%rsi),%r12
|
|
bswap %r12
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
mov %rdx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r8,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rcx,%r15 # (f^g)&e
|
|
mov %r12,16(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r9,%r12 # T1+=h
|
|
|
|
mov %r10,%r9
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
|
|
ror $28,%r9
|
|
ror $34,%r13
|
|
mov %r10,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r9
|
|
ror $5,%r13
|
|
or %rax,%r14 # a|c
|
|
|
|
xor %r13,%r9 # h=Sigma0(a)
|
|
and %rax,%r15 # a&c
|
|
add %r12,%rbx # d+=T1
|
|
|
|
and %r11,%r14 # (a|c)&b
|
|
add %r12,%r9 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r9 # h+=Maj(a,b,c)
|
|
mov 8*3(%rsi),%r12
|
|
bswap %r12
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
mov %rcx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rdx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rbx,%r15 # (f^g)&e
|
|
mov %r12,24(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r8,%r12 # T1+=h
|
|
|
|
mov %r9,%r8
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
|
|
ror $28,%r8
|
|
ror $34,%r13
|
|
mov %r9,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r8
|
|
ror $5,%r13
|
|
or %r11,%r14 # a|c
|
|
|
|
xor %r13,%r8 # h=Sigma0(a)
|
|
and %r11,%r15 # a&c
|
|
add %r12,%rax # d+=T1
|
|
|
|
and %r10,%r14 # (a|c)&b
|
|
add %r12,%r8 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r8 # h+=Maj(a,b,c)
|
|
mov 8*4(%rsi),%r12
|
|
bswap %r12
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
mov %rbx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rcx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rax,%r15 # (f^g)&e
|
|
mov %r12,32(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rdx,%r12 # T1+=h
|
|
|
|
mov %r8,%rdx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
|
|
ror $28,%rdx
|
|
ror $34,%r13
|
|
mov %r8,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rdx
|
|
ror $5,%r13
|
|
or %r10,%r14 # a|c
|
|
|
|
xor %r13,%rdx # h=Sigma0(a)
|
|
and %r10,%r15 # a&c
|
|
add %r12,%r11 # d+=T1
|
|
|
|
and %r9,%r14 # (a|c)&b
|
|
add %r12,%rdx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rdx # h+=Maj(a,b,c)
|
|
mov 8*5(%rsi),%r12
|
|
bswap %r12
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
mov %rax,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rbx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r11,%r15 # (f^g)&e
|
|
mov %r12,40(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rcx,%r12 # T1+=h
|
|
|
|
mov %rdx,%rcx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
|
|
ror $28,%rcx
|
|
ror $34,%r13
|
|
mov %rdx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rcx
|
|
ror $5,%r13
|
|
or %r9,%r14 # a|c
|
|
|
|
xor %r13,%rcx # h=Sigma0(a)
|
|
and %r9,%r15 # a&c
|
|
add %r12,%r10 # d+=T1
|
|
|
|
and %r8,%r14 # (a|c)&b
|
|
add %r12,%rcx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rcx # h+=Maj(a,b,c)
|
|
mov 8*6(%rsi),%r12
|
|
bswap %r12
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
mov %r11,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rax,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r10,%r15 # (f^g)&e
|
|
mov %r12,48(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rbx,%r12 # T1+=h
|
|
|
|
mov %rcx,%rbx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
|
|
ror $28,%rbx
|
|
ror $34,%r13
|
|
mov %rcx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rbx
|
|
ror $5,%r13
|
|
or %r8,%r14 # a|c
|
|
|
|
xor %r13,%rbx # h=Sigma0(a)
|
|
and %r8,%r15 # a&c
|
|
add %r12,%r9 # d+=T1
|
|
|
|
and %rdx,%r14 # (a|c)&b
|
|
add %r12,%rbx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rbx # h+=Maj(a,b,c)
|
|
mov 8*7(%rsi),%r12
|
|
bswap %r12
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
mov %r10,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r11,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r9,%r15 # (f^g)&e
|
|
mov %r12,56(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rax,%r12 # T1+=h
|
|
|
|
mov %rbx,%rax
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
|
|
ror $28,%rax
|
|
ror $34,%r13
|
|
mov %rbx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rax
|
|
ror $5,%r13
|
|
or %rdx,%r14 # a|c
|
|
|
|
xor %r13,%rax # h=Sigma0(a)
|
|
and %rdx,%r15 # a&c
|
|
add %r12,%r8 # d+=T1
|
|
|
|
and %rcx,%r14 # (a|c)&b
|
|
add %r12,%rax # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rax # h+=Maj(a,b,c)
|
|
mov 8*8(%rsi),%r12
|
|
bswap %r12
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
mov %r9,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r10,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r8,%r15 # (f^g)&e
|
|
mov %r12,64(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r11,%r12 # T1+=h
|
|
|
|
mov %rax,%r11
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
|
|
ror $28,%r11
|
|
ror $34,%r13
|
|
mov %rax,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r11
|
|
ror $5,%r13
|
|
or %rcx,%r14 # a|c
|
|
|
|
xor %r13,%r11 # h=Sigma0(a)
|
|
and %rcx,%r15 # a&c
|
|
add %r12,%rdx # d+=T1
|
|
|
|
and %rbx,%r14 # (a|c)&b
|
|
add %r12,%r11 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r11 # h+=Maj(a,b,c)
|
|
mov 8*9(%rsi),%r12
|
|
bswap %r12
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
mov %r8,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r9,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rdx,%r15 # (f^g)&e
|
|
mov %r12,72(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r10,%r12 # T1+=h
|
|
|
|
mov %r11,%r10
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
|
|
ror $28,%r10
|
|
ror $34,%r13
|
|
mov %r11,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r10
|
|
ror $5,%r13
|
|
or %rbx,%r14 # a|c
|
|
|
|
xor %r13,%r10 # h=Sigma0(a)
|
|
and %rbx,%r15 # a&c
|
|
add %r12,%rcx # d+=T1
|
|
|
|
and %rax,%r14 # (a|c)&b
|
|
add %r12,%r10 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r10 # h+=Maj(a,b,c)
|
|
mov 8*10(%rsi),%r12
|
|
bswap %r12
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
mov %rdx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r8,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rcx,%r15 # (f^g)&e
|
|
mov %r12,80(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r9,%r12 # T1+=h
|
|
|
|
mov %r10,%r9
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
|
|
ror $28,%r9
|
|
ror $34,%r13
|
|
mov %r10,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r9
|
|
ror $5,%r13
|
|
or %rax,%r14 # a|c
|
|
|
|
xor %r13,%r9 # h=Sigma0(a)
|
|
and %rax,%r15 # a&c
|
|
add %r12,%rbx # d+=T1
|
|
|
|
and %r11,%r14 # (a|c)&b
|
|
add %r12,%r9 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r9 # h+=Maj(a,b,c)
|
|
mov 8*11(%rsi),%r12
|
|
bswap %r12
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
mov %rcx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rdx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rbx,%r15 # (f^g)&e
|
|
mov %r12,88(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r8,%r12 # T1+=h
|
|
|
|
mov %r9,%r8
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
|
|
ror $28,%r8
|
|
ror $34,%r13
|
|
mov %r9,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r8
|
|
ror $5,%r13
|
|
or %r11,%r14 # a|c
|
|
|
|
xor %r13,%r8 # h=Sigma0(a)
|
|
and %r11,%r15 # a&c
|
|
add %r12,%rax # d+=T1
|
|
|
|
and %r10,%r14 # (a|c)&b
|
|
add %r12,%r8 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r8 # h+=Maj(a,b,c)
|
|
mov 8*12(%rsi),%r12
|
|
bswap %r12
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
mov %rbx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rcx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rax,%r15 # (f^g)&e
|
|
mov %r12,96(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rdx,%r12 # T1+=h
|
|
|
|
mov %r8,%rdx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
|
|
ror $28,%rdx
|
|
ror $34,%r13
|
|
mov %r8,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rdx
|
|
ror $5,%r13
|
|
or %r10,%r14 # a|c
|
|
|
|
xor %r13,%rdx # h=Sigma0(a)
|
|
and %r10,%r15 # a&c
|
|
add %r12,%r11 # d+=T1
|
|
|
|
and %r9,%r14 # (a|c)&b
|
|
add %r12,%rdx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rdx # h+=Maj(a,b,c)
|
|
mov 8*13(%rsi),%r12
|
|
bswap %r12
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
mov %rax,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rbx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r11,%r15 # (f^g)&e
|
|
mov %r12,104(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rcx,%r12 # T1+=h
|
|
|
|
mov %rdx,%rcx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
|
|
ror $28,%rcx
|
|
ror $34,%r13
|
|
mov %rdx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rcx
|
|
ror $5,%r13
|
|
or %r9,%r14 # a|c
|
|
|
|
xor %r13,%rcx # h=Sigma0(a)
|
|
and %r9,%r15 # a&c
|
|
add %r12,%r10 # d+=T1
|
|
|
|
and %r8,%r14 # (a|c)&b
|
|
add %r12,%rcx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rcx # h+=Maj(a,b,c)
|
|
mov 8*14(%rsi),%r12
|
|
bswap %r12
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
mov %r11,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rax,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r10,%r15 # (f^g)&e
|
|
mov %r12,112(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rbx,%r12 # T1+=h
|
|
|
|
mov %rcx,%rbx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
|
|
ror $28,%rbx
|
|
ror $34,%r13
|
|
mov %rcx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rbx
|
|
ror $5,%r13
|
|
or %r8,%r14 # a|c
|
|
|
|
xor %r13,%rbx # h=Sigma0(a)
|
|
and %r8,%r15 # a&c
|
|
add %r12,%r9 # d+=T1
|
|
|
|
and %rdx,%r14 # (a|c)&b
|
|
add %r12,%rbx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rbx # h+=Maj(a,b,c)
|
|
mov 8*15(%rsi),%r12
|
|
bswap %r12
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
mov %r10,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r11,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r9,%r15 # (f^g)&e
|
|
mov %r12,120(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rax,%r12 # T1+=h
|
|
|
|
mov %rbx,%rax
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
|
|
ror $28,%rax
|
|
ror $34,%r13
|
|
mov %rbx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rax
|
|
ror $5,%r13
|
|
or %rdx,%r14 # a|c
|
|
|
|
xor %r13,%rax # h=Sigma0(a)
|
|
and %rdx,%r15 # a&c
|
|
add %r12,%r8 # d+=T1
|
|
|
|
and %rcx,%r14 # (a|c)&b
|
|
add %r12,%rax # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rax # h+=Maj(a,b,c)
|
|
jmp .Lrounds_16_xx
|
|
.align 16
|
|
.Lrounds_16_xx:
|
|
mov 8(%rsp),%r13
|
|
mov 112(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 72(%rsp),%r12
|
|
|
|
add 0(%rsp),%r12
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
mov %r9,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r10,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r8,%r15 # (f^g)&e
|
|
mov %r12,0(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r11,%r12 # T1+=h
|
|
|
|
mov %rax,%r11
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
|
|
ror $28,%r11
|
|
ror $34,%r13
|
|
mov %rax,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r11
|
|
ror $5,%r13
|
|
or %rcx,%r14 # a|c
|
|
|
|
xor %r13,%r11 # h=Sigma0(a)
|
|
and %rcx,%r15 # a&c
|
|
add %r12,%rdx # d+=T1
|
|
|
|
and %rbx,%r14 # (a|c)&b
|
|
add %r12,%r11 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r11 # h+=Maj(a,b,c)
|
|
mov 16(%rsp),%r13
|
|
mov 120(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 80(%rsp),%r12
|
|
|
|
add 8(%rsp),%r12
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
mov %r8,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r9,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rdx,%r15 # (f^g)&e
|
|
mov %r12,8(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r10,%r12 # T1+=h
|
|
|
|
mov %r11,%r10
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
|
|
ror $28,%r10
|
|
ror $34,%r13
|
|
mov %r11,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r10
|
|
ror $5,%r13
|
|
or %rbx,%r14 # a|c
|
|
|
|
xor %r13,%r10 # h=Sigma0(a)
|
|
and %rbx,%r15 # a&c
|
|
add %r12,%rcx # d+=T1
|
|
|
|
and %rax,%r14 # (a|c)&b
|
|
add %r12,%r10 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r10 # h+=Maj(a,b,c)
|
|
mov 24(%rsp),%r13
|
|
mov 0(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 88(%rsp),%r12
|
|
|
|
add 16(%rsp),%r12
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
mov %rdx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r8,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rcx,%r15 # (f^g)&e
|
|
mov %r12,16(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r9,%r12 # T1+=h
|
|
|
|
mov %r10,%r9
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
|
|
ror $28,%r9
|
|
ror $34,%r13
|
|
mov %r10,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r9
|
|
ror $5,%r13
|
|
or %rax,%r14 # a|c
|
|
|
|
xor %r13,%r9 # h=Sigma0(a)
|
|
and %rax,%r15 # a&c
|
|
add %r12,%rbx # d+=T1
|
|
|
|
and %r11,%r14 # (a|c)&b
|
|
add %r12,%r9 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r9 # h+=Maj(a,b,c)
|
|
mov 32(%rsp),%r13
|
|
mov 8(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 96(%rsp),%r12
|
|
|
|
add 24(%rsp),%r12
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
mov %rcx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rdx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rbx,%r15 # (f^g)&e
|
|
mov %r12,24(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r8,%r12 # T1+=h
|
|
|
|
mov %r9,%r8
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
|
|
ror $28,%r8
|
|
ror $34,%r13
|
|
mov %r9,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r8
|
|
ror $5,%r13
|
|
or %r11,%r14 # a|c
|
|
|
|
xor %r13,%r8 # h=Sigma0(a)
|
|
and %r11,%r15 # a&c
|
|
add %r12,%rax # d+=T1
|
|
|
|
and %r10,%r14 # (a|c)&b
|
|
add %r12,%r8 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r8 # h+=Maj(a,b,c)
|
|
mov 40(%rsp),%r13
|
|
mov 16(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 104(%rsp),%r12
|
|
|
|
add 32(%rsp),%r12
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
mov %rbx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rcx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rax,%r15 # (f^g)&e
|
|
mov %r12,32(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rdx,%r12 # T1+=h
|
|
|
|
mov %r8,%rdx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
|
|
ror $28,%rdx
|
|
ror $34,%r13
|
|
mov %r8,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rdx
|
|
ror $5,%r13
|
|
or %r10,%r14 # a|c
|
|
|
|
xor %r13,%rdx # h=Sigma0(a)
|
|
and %r10,%r15 # a&c
|
|
add %r12,%r11 # d+=T1
|
|
|
|
and %r9,%r14 # (a|c)&b
|
|
add %r12,%rdx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rdx # h+=Maj(a,b,c)
|
|
mov 48(%rsp),%r13
|
|
mov 24(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 112(%rsp),%r12
|
|
|
|
add 40(%rsp),%r12
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
mov %rax,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rbx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r11,%r15 # (f^g)&e
|
|
mov %r12,40(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rcx,%r12 # T1+=h
|
|
|
|
mov %rdx,%rcx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
|
|
ror $28,%rcx
|
|
ror $34,%r13
|
|
mov %rdx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rcx
|
|
ror $5,%r13
|
|
or %r9,%r14 # a|c
|
|
|
|
xor %r13,%rcx # h=Sigma0(a)
|
|
and %r9,%r15 # a&c
|
|
add %r12,%r10 # d+=T1
|
|
|
|
and %r8,%r14 # (a|c)&b
|
|
add %r12,%rcx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rcx # h+=Maj(a,b,c)
|
|
mov 56(%rsp),%r13
|
|
mov 32(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 120(%rsp),%r12
|
|
|
|
add 48(%rsp),%r12
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
mov %r11,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rax,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r10,%r15 # (f^g)&e
|
|
mov %r12,48(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rbx,%r12 # T1+=h
|
|
|
|
mov %rcx,%rbx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
|
|
ror $28,%rbx
|
|
ror $34,%r13
|
|
mov %rcx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rbx
|
|
ror $5,%r13
|
|
or %r8,%r14 # a|c
|
|
|
|
xor %r13,%rbx # h=Sigma0(a)
|
|
and %r8,%r15 # a&c
|
|
add %r12,%r9 # d+=T1
|
|
|
|
and %rdx,%r14 # (a|c)&b
|
|
add %r12,%rbx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rbx # h+=Maj(a,b,c)
|
|
mov 64(%rsp),%r13
|
|
mov 40(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 0(%rsp),%r12
|
|
|
|
add 56(%rsp),%r12
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
mov %r10,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r11,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r9,%r15 # (f^g)&e
|
|
mov %r12,56(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rax,%r12 # T1+=h
|
|
|
|
mov %rbx,%rax
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
|
|
ror $28,%rax
|
|
ror $34,%r13
|
|
mov %rbx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rax
|
|
ror $5,%r13
|
|
or %rdx,%r14 # a|c
|
|
|
|
xor %r13,%rax # h=Sigma0(a)
|
|
and %rdx,%r15 # a&c
|
|
add %r12,%r8 # d+=T1
|
|
|
|
and %rcx,%r14 # (a|c)&b
|
|
add %r12,%rax # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rax # h+=Maj(a,b,c)
|
|
mov 72(%rsp),%r13
|
|
mov 48(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 8(%rsp),%r12
|
|
|
|
add 64(%rsp),%r12
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
mov %r9,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r10,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r8,%r15 # (f^g)&e
|
|
mov %r12,64(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r11,%r12 # T1+=h
|
|
|
|
mov %rax,%r11
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
|
|
ror $28,%r11
|
|
ror $34,%r13
|
|
mov %rax,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r11
|
|
ror $5,%r13
|
|
or %rcx,%r14 # a|c
|
|
|
|
xor %r13,%r11 # h=Sigma0(a)
|
|
and %rcx,%r15 # a&c
|
|
add %r12,%rdx # d+=T1
|
|
|
|
and %rbx,%r14 # (a|c)&b
|
|
add %r12,%r11 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r11 # h+=Maj(a,b,c)
|
|
mov 80(%rsp),%r13
|
|
mov 56(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 16(%rsp),%r12
|
|
|
|
add 72(%rsp),%r12
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
mov %r8,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r9,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rdx,%r15 # (f^g)&e
|
|
mov %r12,72(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r10,%r12 # T1+=h
|
|
|
|
mov %r11,%r10
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
|
|
ror $28,%r10
|
|
ror $34,%r13
|
|
mov %r11,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r10
|
|
ror $5,%r13
|
|
or %rbx,%r14 # a|c
|
|
|
|
xor %r13,%r10 # h=Sigma0(a)
|
|
and %rbx,%r15 # a&c
|
|
add %r12,%rcx # d+=T1
|
|
|
|
and %rax,%r14 # (a|c)&b
|
|
add %r12,%r10 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r10 # h+=Maj(a,b,c)
|
|
mov 88(%rsp),%r13
|
|
mov 64(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 24(%rsp),%r12
|
|
|
|
add 80(%rsp),%r12
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
mov %rdx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r8,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rcx,%r15 # (f^g)&e
|
|
mov %r12,80(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r9,%r12 # T1+=h
|
|
|
|
mov %r10,%r9
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
|
|
ror $28,%r9
|
|
ror $34,%r13
|
|
mov %r10,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r9
|
|
ror $5,%r13
|
|
or %rax,%r14 # a|c
|
|
|
|
xor %r13,%r9 # h=Sigma0(a)
|
|
and %rax,%r15 # a&c
|
|
add %r12,%rbx # d+=T1
|
|
|
|
and %r11,%r14 # (a|c)&b
|
|
add %r12,%r9 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r9 # h+=Maj(a,b,c)
|
|
mov 96(%rsp),%r13
|
|
mov 72(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 32(%rsp),%r12
|
|
|
|
add 88(%rsp),%r12
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
mov %rcx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rdx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rbx,%r15 # (f^g)&e
|
|
mov %r12,88(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %r8,%r12 # T1+=h
|
|
|
|
mov %r9,%r8
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
|
|
ror $28,%r8
|
|
ror $34,%r13
|
|
mov %r9,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%r8
|
|
ror $5,%r13
|
|
or %r11,%r14 # a|c
|
|
|
|
xor %r13,%r8 # h=Sigma0(a)
|
|
and %r11,%r15 # a&c
|
|
add %r12,%rax # d+=T1
|
|
|
|
and %r10,%r14 # (a|c)&b
|
|
add %r12,%r8 # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%r8 # h+=Maj(a,b,c)
|
|
mov 104(%rsp),%r13
|
|
mov 80(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 40(%rsp),%r12
|
|
|
|
add 96(%rsp),%r12
|
|
mov %rax,%r13
|
|
mov %rax,%r14
|
|
mov %rbx,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rcx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %rax,%r15 # (f^g)&e
|
|
mov %r12,96(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rdx,%r12 # T1+=h
|
|
|
|
mov %r8,%rdx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %r8,%r13
|
|
mov %r8,%r14
|
|
|
|
ror $28,%rdx
|
|
ror $34,%r13
|
|
mov %r8,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rdx
|
|
ror $5,%r13
|
|
or %r10,%r14 # a|c
|
|
|
|
xor %r13,%rdx # h=Sigma0(a)
|
|
and %r10,%r15 # a&c
|
|
add %r12,%r11 # d+=T1
|
|
|
|
and %r9,%r14 # (a|c)&b
|
|
add %r12,%rdx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rdx # h+=Maj(a,b,c)
|
|
mov 112(%rsp),%r13
|
|
mov 88(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 48(%rsp),%r12
|
|
|
|
add 104(%rsp),%r12
|
|
mov %r11,%r13
|
|
mov %r11,%r14
|
|
mov %rax,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rbx,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r11,%r15 # (f^g)&e
|
|
mov %r12,104(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rcx,%r12 # T1+=h
|
|
|
|
mov %rdx,%rcx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rdx,%r13
|
|
mov %rdx,%r14
|
|
|
|
ror $28,%rcx
|
|
ror $34,%r13
|
|
mov %rdx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rcx
|
|
ror $5,%r13
|
|
or %r9,%r14 # a|c
|
|
|
|
xor %r13,%rcx # h=Sigma0(a)
|
|
and %r9,%r15 # a&c
|
|
add %r12,%r10 # d+=T1
|
|
|
|
and %r8,%r14 # (a|c)&b
|
|
add %r12,%rcx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rcx # h+=Maj(a,b,c)
|
|
mov 120(%rsp),%r13
|
|
mov 96(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 56(%rsp),%r12
|
|
|
|
add 112(%rsp),%r12
|
|
mov %r10,%r13
|
|
mov %r10,%r14
|
|
mov %r11,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %rax,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r10,%r15 # (f^g)&e
|
|
mov %r12,112(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rbx,%r12 # T1+=h
|
|
|
|
mov %rcx,%rbx
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rcx,%r13
|
|
mov %rcx,%r14
|
|
|
|
ror $28,%rbx
|
|
ror $34,%r13
|
|
mov %rcx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rbx
|
|
ror $5,%r13
|
|
or %r8,%r14 # a|c
|
|
|
|
xor %r13,%rbx # h=Sigma0(a)
|
|
and %r8,%r15 # a&c
|
|
add %r12,%r9 # d+=T1
|
|
|
|
and %rdx,%r14 # (a|c)&b
|
|
add %r12,%rbx # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rbx # h+=Maj(a,b,c)
|
|
mov 0(%rsp),%r13
|
|
mov 104(%rsp),%r12
|
|
|
|
mov %r13,%r15
|
|
|
|
shr $7,%r13
|
|
ror $1,%r15
|
|
|
|
xor %r15,%r13
|
|
ror $7,%r15
|
|
|
|
xor %r15,%r13 # sigma0(X[(i+1)&0xf])
|
|
mov %r12,%r14
|
|
|
|
shr $6,%r12
|
|
ror $19,%r14
|
|
|
|
xor %r14,%r12
|
|
ror $42,%r14
|
|
|
|
xor %r14,%r12 # sigma1(X[(i+14)&0xf])
|
|
|
|
add %r13,%r12
|
|
|
|
add 64(%rsp),%r12
|
|
|
|
add 120(%rsp),%r12
|
|
mov %r9,%r13
|
|
mov %r9,%r14
|
|
mov %r10,%r15
|
|
|
|
ror $14,%r13
|
|
ror $18,%r14
|
|
xor %r11,%r15 # f^g
|
|
|
|
xor %r14,%r13
|
|
ror $23,%r14
|
|
and %r9,%r15 # (f^g)&e
|
|
mov %r12,120(%rsp)
|
|
|
|
xor %r14,%r13 # Sigma1(e)
|
|
xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g
|
|
add %rax,%r12 # T1+=h
|
|
|
|
mov %rbx,%rax
|
|
add %r13,%r12 # T1+=Sigma1(e)
|
|
|
|
add %r15,%r12 # T1+=Ch(e,f,g)
|
|
mov %rbx,%r13
|
|
mov %rbx,%r14
|
|
|
|
ror $28,%rax
|
|
ror $34,%r13
|
|
mov %rbx,%r15
|
|
add (%rbp,%rdi,8),%r12 # T1+=K[round]
|
|
|
|
xor %r13,%rax
|
|
ror $5,%r13
|
|
or %rdx,%r14 # a|c
|
|
|
|
xor %r13,%rax # h=Sigma0(a)
|
|
and %rdx,%r15 # a&c
|
|
add %r12,%r8 # d+=T1
|
|
|
|
and %rcx,%r14 # (a|c)&b
|
|
add %r12,%rax # h+=T1
|
|
|
|
or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c)
|
|
lea 1(%rdi),%rdi # round++
|
|
|
|
add %r14,%rax # h+=Maj(a,b,c)
|
|
cmp $80,%rdi
|
|
jb .Lrounds_16_xx
|
|
|
|
mov 16*8+0*8(%rsp),%rdi
|
|
lea 16*8(%rsi),%rsi
|
|
|
|
add 8*0(%rdi),%rax
|
|
add 8*1(%rdi),%rbx
|
|
add 8*2(%rdi),%rcx
|
|
add 8*3(%rdi),%rdx
|
|
add 8*4(%rdi),%r8
|
|
add 8*5(%rdi),%r9
|
|
add 8*6(%rdi),%r10
|
|
add 8*7(%rdi),%r11
|
|
|
|
cmp 16*8+2*8(%rsp),%rsi
|
|
|
|
mov %rax,8*0(%rdi)
|
|
mov %rbx,8*1(%rdi)
|
|
mov %rcx,8*2(%rdi)
|
|
mov %rdx,8*3(%rdi)
|
|
mov %r8,8*4(%rdi)
|
|
mov %r9,8*5(%rdi)
|
|
mov %r10,8*6(%rdi)
|
|
mov %r11,8*7(%rdi)
|
|
jb .Lloop
|
|
|
|
mov 16*8+3*8(%rsp),%rsp
|
|
.cfi_def_cfa %rsp,56
|
|
pop %r15
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %r15
|
|
pop %r14
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %r14
|
|
pop %r13
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %r13
|
|
pop %r12
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %r12
|
|
pop %rbp
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %rbp
|
|
pop %rbx
|
|
.cfi_adjust_cfa_offset -8
|
|
.cfi_restore %rbx
|
|
|
|
RET
|
|
.cfi_endproc
|
|
SET_SIZE(SHA512TransformBlocks)
|
|
|
|
.section .rodata
|
|
.align 64
|
|
.type K512,@object
|
|
K512:
|
|
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
|
|
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
|
|
.quad 0x3956c25bf348b538,0x59f111f1b605d019
|
|
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
|
|
.quad 0xd807aa98a3030242,0x12835b0145706fbe
|
|
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
|
|
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
|
|
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
|
|
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
|
|
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
|
|
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
|
|
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
|
|
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
|
|
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
|
|
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
|
|
.quad 0x06ca6351e003826f,0x142929670a0e6e70
|
|
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
|
|
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
|
|
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
|
|
.quad 0x81c2c92e47edaee6,0x92722c851482353b
|
|
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
|
|
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
|
|
.quad 0xd192e819d6ef5218,0xd69906245565a910
|
|
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
|
|
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
|
|
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
|
|
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
|
|
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
|
|
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
|
|
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
|
|
.quad 0x90befffa23631e28,0xa4506cebde82bde9
|
|
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
|
|
.quad 0xca273eceea26619c,0xd186b8c721c0c207
|
|
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
|
|
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
|
|
.quad 0x113f9804bef90dae,0x1b710b35131c471b
|
|
.quad 0x28db77f523047d84,0x32caab7b40c72493
|
|
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
|
|
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
|
|
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
|
|
#endif /* !lint && !__lint */
|
|
|
|
#ifdef __ELF__
|
|
.section .note.GNU-stack,"",%progbits
|
|
#endif
|