Refactor checksum code, and add Objectname hashing per uploaded objects
Add placeholder sha256, sha512 implementations from Intel with AVX, AVX2 and SSE4.1 extensions - with some boilerplate Go code.
This commit is contained in:
261
pkgs/checksum/crc32c/asm.S
Normal file
261
pkgs/checksum/crc32c/asm.S
Normal file
@@ -0,0 +1,261 @@
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ; \
|
||||
.align 4,0x90 ; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
@@ -43,277 +43,7 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef ASM_NL
|
||||
#define ASM_NL ;
|
||||
#endif
|
||||
|
||||
#ifndef __ALIGN
|
||||
#define __ALIGN .align 4,0x90
|
||||
#endif
|
||||
|
||||
#define ALIGN __ALIGN
|
||||
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ASM_NL \
|
||||
ALIGN ASM_NL \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ASM_NL \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
#include "asm.S"
|
||||
|
||||
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
|
||||
|
||||
|
||||
261
pkgs/crypto/sha1/asm.S
Normal file
261
pkgs/crypto/sha1/asm.S
Normal file
@@ -0,0 +1,261 @@
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ; \
|
||||
.align 4,0x90 ; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
@@ -67,278 +67,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef ASM_NL
|
||||
#define ASM_NL ;
|
||||
#endif
|
||||
|
||||
#ifndef __ALIGN
|
||||
#define __ALIGN .align 4,0x90
|
||||
#endif
|
||||
|
||||
#define ALIGN __ALIGN
|
||||
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ASM_NL \
|
||||
ALIGN ASM_NL \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ASM_NL \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
#include "asm.S"
|
||||
|
||||
#define CTX %rdi /* arg1 */
|
||||
#define BUF %rsi /* arg2 */
|
||||
0
pkgs/crypto/sha256/TODO
Normal file
0
pkgs/crypto/sha256/TODO
Normal file
261
pkgs/crypto/sha256/asm.S
Normal file
261
pkgs/crypto/sha256/asm.S
Normal file
@@ -0,0 +1,261 @@
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ; \
|
||||
.align 4,0x90 ; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
493
pkgs/crypto/sha256/sha256-avx-asm.S
Normal file
493
pkgs/crypto/sha256/sha256-avx-asm.S
Normal file
@@ -0,0 +1,493 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with AVX1 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
# This code schedules 1 block at a time, with 4 lanes per block
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
|
||||
.macro MY_ROR p1 p2
|
||||
shld $(32-(\p1)), \p2, \p2
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
# COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
|
||||
# Load xmm with mem and byte swap each dword
|
||||
.macro COPY_XMM_AND_BSWAP p1 p2 p3
|
||||
VMOVDQ \p2, \p1
|
||||
vpshufb \p3, \p1, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %xmm4
|
||||
X1 = %xmm5
|
||||
X2 = %xmm6
|
||||
X3 = %xmm7
|
||||
|
||||
XTMP0 = %xmm0
|
||||
XTMP1 = %xmm1
|
||||
XTMP2 = %xmm2
|
||||
XTMP3 = %xmm3
|
||||
XTMP4 = %xmm8
|
||||
XFER = %xmm9
|
||||
XTMP5 = %xmm11
|
||||
|
||||
SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %xmm13
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
|
||||
SRND = %rdi # clobbers INP
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx
|
||||
TBL = %rbp
|
||||
a = %eax
|
||||
b = %ebx
|
||||
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_XFER_SIZE = 16
|
||||
_XMM_SAVE_SIZE = 0
|
||||
|
||||
_INP_END = 0
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_XFER = _INP + _INP_SIZE
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED
|
||||
## compute s0 four at a time and s1 two at a time
|
||||
## compute W[-16] + W[-7] 4 at a time
|
||||
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
## compute s0
|
||||
vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpsrld $7, XTMP1, XTMP2
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpslld $(32-7), XTMP1, XTMP3
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
vpsrld $18, XTMP1, XTMP2 #
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
vpslld $(32-18), XTMP1, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
vpxor XTMP1, XTMP3, XTMP3 #
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
## compute low s1
|
||||
vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA}
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
vpxor XTMP3, XTMP2, XTMP2 #
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
## compute high s1
|
||||
vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC}
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
## input is [rsp + _XFER + %1 * 4]
|
||||
.macro DO_ROUND round
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
offset = \round * 4 + _XFER #
|
||||
add offset(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_avx)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
pushq %r12
|
||||
|
||||
mov %rsp, %r12
|
||||
subq $STACK_SIZE, %rsp # allocate stack space
|
||||
and $~15, %rsp # align stack pointer
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
add INP, NUM_BLKS # pointer to end of data
|
||||
mov NUM_BLKS, _INP_END(%rsp)
|
||||
|
||||
## load initial digest
|
||||
mov 4*0(CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## byte swap first 16 dwords
|
||||
COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK
|
||||
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 16 each
|
||||
mov $3, SRND
|
||||
.align 16
|
||||
loop1:
|
||||
vpaddd (TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 1*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 2*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 3*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
add $4*16, TBL
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
sub $1, SRND
|
||||
jne loop1
|
||||
|
||||
mov $2, SRND
|
||||
loop2:
|
||||
vpaddd (TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
vpaddd 1*16(TBL), X1, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
add $2*16, TBL
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
vmovdqa X2, X0
|
||||
vmovdqa X3, X1
|
||||
|
||||
sub $1, SRND
|
||||
jne loop2
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
cmp _INP_END(%rsp), INP
|
||||
jne loop0
|
||||
|
||||
done_hash:
|
||||
|
||||
mov %r12, %rsp
|
||||
|
||||
popq %r12
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
ret
|
||||
ENDPROC(sha256_transform_avx)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
770
pkgs/crypto/sha256/sha256-avx2-asm.S
Normal file
770
pkgs/crypto/sha256/sha256-avx2-asm.S
Normal file
@@ -0,0 +1,770 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with AVX2 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
# This code schedules 2 blocks at a time, with 4 lanes per block
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %ymm4
|
||||
X1 = %ymm5
|
||||
X2 = %ymm6
|
||||
X3 = %ymm7
|
||||
|
||||
# XMM versions of above
|
||||
XWORD0 = %xmm4
|
||||
XWORD1 = %xmm5
|
||||
XWORD2 = %xmm6
|
||||
XWORD3 = %xmm7
|
||||
|
||||
XTMP0 = %ymm0
|
||||
XTMP1 = %ymm1
|
||||
XTMP2 = %ymm2
|
||||
XTMP3 = %ymm3
|
||||
XTMP4 = %ymm8
|
||||
XFER = %ymm9
|
||||
XTMP5 = %ymm11
|
||||
|
||||
SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %ymm13
|
||||
|
||||
X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx # clobbers NUM_BLKS
|
||||
y3 = %edi # clobbers INP
|
||||
|
||||
|
||||
TBL = %rbp
|
||||
SRND = CTX # SRND is same register as CTX
|
||||
|
||||
a = %eax
|
||||
b = %ebx
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
old_h = %r11d
|
||||
|
||||
T1 = %r12d
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
_XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round
|
||||
_XMM_SAVE_SIZE = 0
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_CTX_SIZE = 8
|
||||
_RSP_SIZE = 8
|
||||
|
||||
_XFER = 0
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
_INP_END = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_CTX = _INP + _INP_SIZE
|
||||
_RSP = _CTX + _CTX_SIZE
|
||||
STACK_SIZE = _RSP + _RSP_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
old_h = h
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED disp
|
||||
################################### RND N + 0 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
|
||||
addl \disp(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
vpsrld $7, XTMP1, XTMP2
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
vpslld $(32-7), XTMP1, XTMP3
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7
|
||||
|
||||
vpsrld $18, XTMP1, XTMP2
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 1 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
offset = \disp + 1*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
vpslld $(32-18), XTMP1, XTMP1
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
|
||||
vpxor XTMP1, XTMP3, XTMP3
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0
|
||||
vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 2 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
offset = \disp + 2*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
mov f, y2 # y2 = f # CH
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a ,T1 # T1 = (a >> 2) # S0
|
||||
vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
|
||||
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1,h # h = k + w + h + S0 # --
|
||||
add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3,h # h = t1 + S0 + MAJ # --
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 3 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
offset = \disp + 3*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00}
|
||||
|
||||
vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
.macro DO_4ROUNDS disp
|
||||
################################### RND N + 0 ###########################
|
||||
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
addl \disp(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 1 ###########################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*1 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 2 ##############################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*2 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 3 ###########################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*3 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_rorx)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
|
||||
mov %rsp, %rax
|
||||
subq $STACK_SIZE, %rsp
|
||||
and $-32, %rsp # align rsp to 32 byte boundary
|
||||
mov %rax, _RSP(%rsp)
|
||||
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
|
||||
mov NUM_BLKS, _INP_END(%rsp)
|
||||
|
||||
cmp NUM_BLKS, INP
|
||||
je only_one_block
|
||||
|
||||
## load initial digest
|
||||
mov (CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
mov CTX, _CTX(%rsp)
|
||||
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## Load first 16 dwords from two blocks
|
||||
VMOVDQ 0*32(INP),XTMP0
|
||||
VMOVDQ 1*32(INP),XTMP1
|
||||
VMOVDQ 2*32(INP),XTMP2
|
||||
VMOVDQ 3*32(INP),XTMP3
|
||||
|
||||
## byte swap data
|
||||
vpshufb BYTE_FLIP_MASK, XTMP0, XTMP0
|
||||
vpshufb BYTE_FLIP_MASK, XTMP1, XTMP1
|
||||
vpshufb BYTE_FLIP_MASK, XTMP2, XTMP2
|
||||
vpshufb BYTE_FLIP_MASK, XTMP3, XTMP3
|
||||
|
||||
## transpose data into high/low halves
|
||||
vperm2i128 $0x20, XTMP2, XTMP0, X0
|
||||
vperm2i128 $0x31, XTMP2, XTMP0, X1
|
||||
vperm2i128 $0x20, XTMP3, XTMP1, X2
|
||||
vperm2i128 $0x31, XTMP3, XTMP1, X3
|
||||
|
||||
last_block_enter:
|
||||
add $64, INP
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 12 each
|
||||
xor SRND, SRND
|
||||
|
||||
.align 16
|
||||
loop1:
|
||||
vpaddd 0*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
|
||||
|
||||
vpaddd 1*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
|
||||
|
||||
vpaddd 2*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
|
||||
|
||||
vpaddd 3*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
|
||||
|
||||
add $4*32, SRND
|
||||
cmp $3*4*32, SRND
|
||||
jb loop1
|
||||
|
||||
loop2:
|
||||
## Do last 16 rounds with no scheduling
|
||||
vpaddd 0*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
|
||||
DO_4ROUNDS _XFER + 0*32
|
||||
vpaddd 1*32(TBL, SRND), X1, XFER
|
||||
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
|
||||
DO_4ROUNDS _XFER + 1*32
|
||||
add $2*32, SRND
|
||||
|
||||
vmovdqa X2, X0
|
||||
vmovdqa X3, X1
|
||||
|
||||
cmp $4*4*32, SRND
|
||||
jb loop2
|
||||
|
||||
mov _CTX(%rsp), CTX
|
||||
mov _INP(%rsp), INP
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
cmp _INP_END(%rsp), INP
|
||||
ja done_hash
|
||||
|
||||
#### Do second block using previously scheduled results
|
||||
xor SRND, SRND
|
||||
.align 16
|
||||
loop3:
|
||||
DO_4ROUNDS _XFER + 0*32 + 16
|
||||
DO_4ROUNDS _XFER + 1*32 + 16
|
||||
add $2*32, SRND
|
||||
cmp $4*4*32, SRND
|
||||
jb loop3
|
||||
|
||||
mov _CTX(%rsp), CTX
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
cmp _INP_END(%rsp), INP
|
||||
jb loop0
|
||||
ja done_hash
|
||||
|
||||
do_last_block:
|
||||
#### do last block
|
||||
lea K256(%rip), TBL
|
||||
|
||||
VMOVDQ 0*16(INP),XWORD0
|
||||
VMOVDQ 1*16(INP),XWORD1
|
||||
VMOVDQ 2*16(INP),XWORD2
|
||||
VMOVDQ 3*16(INP),XWORD3
|
||||
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD0, XWORD0
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD1, XWORD1
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3
|
||||
|
||||
jmp last_block_enter
|
||||
|
||||
only_one_block:
|
||||
|
||||
## load initial digest
|
||||
mov (4*0)(CTX),a
|
||||
mov (4*1)(CTX),b
|
||||
mov (4*2)(CTX),c
|
||||
mov (4*3)(CTX),d
|
||||
mov (4*4)(CTX),e
|
||||
mov (4*5)(CTX),f
|
||||
mov (4*6)(CTX),g
|
||||
mov (4*7)(CTX),h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
mov CTX, _CTX(%rsp)
|
||||
jmp do_last_block
|
||||
|
||||
done_hash:
|
||||
|
||||
mov _RSP(%rsp), %rsp
|
||||
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %r12
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
ret
|
||||
ENDPROC(sha256_transform_rorx)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
506
pkgs/crypto/sha256/sha256-ssse3-asm.S
Normal file
506
pkgs/crypto/sha256/sha256-ssse3-asm.S
Normal file
@@ -0,0 +1,506 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with SSSE3 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define MOVDQ movdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
# COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
|
||||
# Load xmm with mem and byte swap each dword
|
||||
.macro COPY_XMM_AND_BSWAP p1 p2 p3
|
||||
MOVDQ \p2, \p1
|
||||
pshufb \p3, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %xmm4
|
||||
X1 = %xmm5
|
||||
X2 = %xmm6
|
||||
X3 = %xmm7
|
||||
|
||||
XTMP0 = %xmm0
|
||||
XTMP1 = %xmm1
|
||||
XTMP2 = %xmm2
|
||||
XTMP3 = %xmm3
|
||||
XTMP4 = %xmm8
|
||||
XFER = %xmm9
|
||||
|
||||
SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %xmm12
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
|
||||
SRND = %rdi # clobbers INP
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx
|
||||
TBL = %rbp
|
||||
a = %eax
|
||||
b = %ebx
|
||||
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_XFER_SIZE = 16
|
||||
_XMM_SAVE_SIZE = 0
|
||||
|
||||
_INP_END = 0
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_XFER = _INP + _INP_SIZE
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED
|
||||
## compute s0 four at a time and s1 two at a time
|
||||
## compute W[-16] + W[-7] 4 at a time
|
||||
movdqa X3, XTMP0
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
palignr $4, X2, XTMP0 # XTMP0 = W[-7]
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
movdqa X1, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
## compute s0
|
||||
palignr $4, X0, XTMP1 # XTMP1 = W[-15]
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
|
||||
movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pslld $(32-7), XTMP1 #
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
psrld $7, XTMP2 #
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
#
|
||||
ROTATE_ARGS #
|
||||
movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
pslld $(32-18), XTMP3 #
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
psrld $18, XTMP2 #
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
pxor XTMP3, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pxor XTMP4, XTMP1 # XTMP1 = s0
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
## compute low s1
|
||||
pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
|
||||
ROTATE_ARGS
|
||||
movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
|
||||
xor g, y2 # y2 = f^g
|
||||
psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
pxor XTMP3, XTMP2
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
pxor XTMP2, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pshufb SHUF_00BA, XTMP4 # XTMP4 = s1 {00BA}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
## compute high s1
|
||||
pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
#
|
||||
ROTATE_ARGS #
|
||||
movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>2
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
pxor XTMP3, XTMP2 #
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>2
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
pxor XTMP2, X0 # X0 = s1 {xDxC}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pshufb SHUF_DC00, X0 # X0 = s1 {DC00}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
paddd XTMP0, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
## input is [rsp + _XFER + %1 * 4]
|
||||
.macro DO_ROUND round
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
offset = \round * 4 + _XFER
|
||||
add offset(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_ssse3)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
pushq %r12
|
||||
|
||||
mov %rsp, %r12
|
||||
subq $STACK_SIZE, %rsp
|
||||
and $~15, %rsp
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
add INP, NUM_BLKS
|
||||
mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data
|
||||
|
||||
## load initial digest
|
||||
mov 4*0(CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
movdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
movdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## byte swap first 16 dwords
|
||||
COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK
|
||||
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 16 each
|
||||
mov $3, SRND
|
||||
.align 16
|
||||
loop1:
|
||||
movdqa (TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 1*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 2*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 3*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
add $4*16, TBL
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
sub $1, SRND
|
||||
jne loop1
|
||||
|
||||
mov $2, SRND
|
||||
loop2:
|
||||
paddd (TBL), X0
|
||||
movdqa X0, _XFER(%rsp)
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
paddd 1*16(TBL), X1
|
||||
movdqa X1, _XFER(%rsp)
|
||||
add $2*16, TBL
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
movdqa X2, X0
|
||||
movdqa X3, X1
|
||||
|
||||
sub $1, SRND
|
||||
jne loop2
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
cmp _INP_END(%rsp), INP
|
||||
jne loop0
|
||||
|
||||
done_hash:
|
||||
|
||||
mov %r12, %rsp
|
||||
|
||||
popq %r12
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
|
||||
ret
|
||||
ENDPROC(sha256_transform_ssse3)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
39
pkgs/crypto/sha256/sha256.go
Normal file
39
pkgs/crypto/sha256/sha256.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// +build amd64
|
||||
|
||||
package sha256
|
||||
|
||||
// #include <stdint.h>
|
||||
// void sha256_transform_avx (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// void sha256_transform_ssse3 (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// void sha256_transform_rorx (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// #define SHA256_DIGEST_SIZE 32
|
||||
// #define SHA256_BLOCK_SIZE 64
|
||||
// #define SHA256_H0 0x6a09e667UL
|
||||
// #define SHA256_H1 0xbb67ae85UL
|
||||
// #define SHA256_H2 0x3c6ef372UL
|
||||
// #define SHA256_H3 0xa54ff53aUL
|
||||
// #define SHA256_H4 0x510e527fUL
|
||||
// #define SHA256_H5 0x9b05688cUL
|
||||
// #define SHA256_H6 0x1f83d9abUL
|
||||
// #define SHA256_H7 0x5be0cd19UL
|
||||
import "C"
|
||||
|
||||
/*
|
||||
func Sha256(buffer []byte) ([]uint32, error) {
|
||||
|
||||
if cpu.HasSSE41() {
|
||||
C.sha256_transform_ssse3()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if cpu.HasAVX() {
|
||||
C.sha256_transform_avx()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if cpu.HasAVX2() {
|
||||
C.sha256_transform_rorx()
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
*/
|
||||
0
pkgs/crypto/sha512/TODO
Normal file
0
pkgs/crypto/sha512/TODO
Normal file
261
pkgs/crypto/sha512/asm.S
Normal file
261
pkgs/crypto/sha512/asm.S
Normal file
@@ -0,0 +1,261 @@
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ; \
|
||||
.align 4,0x90 ; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
421
pkgs/crypto/sha512/sha512-avx-asm.S
Normal file
421
pkgs/crypto/sha512/sha512-avx-asm.S
Normal file
@@ -0,0 +1,421 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-512 with AVX instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# David Cote <david.m.cote@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-512 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
# Virtual Registers
|
||||
# ARG1
|
||||
msg = %rdi
|
||||
# ARG2
|
||||
digest = %rsi
|
||||
# ARG3
|
||||
msglen = %rdx
|
||||
T1 = %rcx
|
||||
T2 = %r8
|
||||
a_64 = %r9
|
||||
b_64 = %r10
|
||||
c_64 = %r11
|
||||
d_64 = %r12
|
||||
e_64 = %r13
|
||||
f_64 = %r14
|
||||
g_64 = %r15
|
||||
h_64 = %rbx
|
||||
tmp0 = %rax
|
||||
|
||||
# Local variables (stack frame)
|
||||
|
||||
# Message Schedule
|
||||
W_SIZE = 80*8
|
||||
# W[t] + K[t] | W[t+1] + K[t+1]
|
||||
WK_SIZE = 2*8
|
||||
RSPSAVE_SIZE = 1*8
|
||||
GPRSAVE_SIZE = 5*8
|
||||
|
||||
frame_W = 0
|
||||
frame_WK = frame_W + W_SIZE
|
||||
frame_RSPSAVE = frame_WK + WK_SIZE
|
||||
frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
|
||||
frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
||||
|
||||
# Useful QWORD "arrays" for simpler memory references
|
||||
# MSG, DIGEST, K_t, W_t are arrays
|
||||
# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
|
||||
|
||||
# Input message (arg1)
|
||||
#define MSG(i) 8*i(msg)
|
||||
|
||||
# Output Digest (arg2)
|
||||
#define DIGEST(i) 8*i(digest)
|
||||
|
||||
# SHA Constants (static mem)
|
||||
#define K_t(i) 8*i+K512(%rip)
|
||||
|
||||
# Message Schedule (stack frame)
|
||||
#define W_t(i) 8*i+frame_W(%rsp)
|
||||
|
||||
# W[t]+K[t] (stack frame)
|
||||
#define WK_2(i) 8*((i%2))+frame_WK(%rsp)
|
||||
|
||||
.macro RotateState
|
||||
# Rotate symbols a..h right
|
||||
TMP = h_64
|
||||
h_64 = g_64
|
||||
g_64 = f_64
|
||||
f_64 = e_64
|
||||
e_64 = d_64
|
||||
d_64 = c_64
|
||||
c_64 = b_64
|
||||
b_64 = a_64
|
||||
a_64 = TMP
|
||||
.endm
|
||||
|
||||
.macro RORQ p1 p2
|
||||
# shld is faster than ror on Sandybridge
|
||||
shld $(64-\p2), \p1, \p1
|
||||
.endm
|
||||
|
||||
.macro SHA512_Round rnd
|
||||
# Compute Round %%t
|
||||
mov f_64, T1 # T1 = f
|
||||
mov e_64, tmp0 # tmp = e
|
||||
xor g_64, T1 # T1 = f ^ g
|
||||
RORQ tmp0, 23 # 41 # tmp = e ror 23
|
||||
and e_64, T1 # T1 = (f ^ g) & e
|
||||
xor e_64, tmp0 # tmp = (e ror 23) ^ e
|
||||
xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g)
|
||||
idx = \rnd
|
||||
add WK_2(idx), T1 # W[t] + K[t] from message scheduler
|
||||
RORQ tmp0, 4 # 18 # tmp = ((e ror 23) ^ e) ror 4
|
||||
xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e
|
||||
mov a_64, T2 # T2 = a
|
||||
add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
|
||||
RORQ tmp0, 14 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e)
|
||||
add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
|
||||
mov a_64, tmp0 # tmp = a
|
||||
xor c_64, T2 # T2 = a ^ c
|
||||
and c_64, tmp0 # tmp = a & c
|
||||
and b_64, T2 # T2 = (a ^ c) & b
|
||||
xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c)
|
||||
mov a_64, tmp0 # tmp = a
|
||||
RORQ tmp0, 5 # 39 # tmp = a ror 5
|
||||
xor a_64, tmp0 # tmp = (a ror 5) ^ a
|
||||
add T1, d_64 # e(next_state) = d + T1
|
||||
RORQ tmp0, 6 # 34 # tmp = ((a ror 5) ^ a) ror 6
|
||||
xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a
|
||||
lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c)
|
||||
RORQ tmp0, 28 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a)
|
||||
add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a)
|
||||
RotateState
|
||||
.endm
|
||||
|
||||
.macro SHA512_2Sched_2Round_avx rnd
|
||||
# Compute rounds t-2 and t-1
|
||||
# Compute message schedule QWORDS t and t+1
|
||||
|
||||
# Two rounds are computed based on the values for K[t-2]+W[t-2] and
|
||||
# K[t-1]+W[t-1] which were previously stored at WK_2 by the message
|
||||
# scheduler.
|
||||
# The two new schedule QWORDS are stored at [W_t(t)] and [W_t(t+1)].
|
||||
# They are then added to their respective SHA512 constants at
|
||||
# [K_t(t)] and [K_t(t+1)] and stored at dqword [WK_2(t)]
|
||||
# For brievity, the comments following vectored instructions only refer to
|
||||
# the first of a pair of QWORDS.
|
||||
# Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
|
||||
# The computation of the message schedule and the rounds are tightly
|
||||
# stitched to take advantage of instruction-level parallelism.
|
||||
|
||||
idx = \rnd - 2
|
||||
vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2]
|
||||
idx = \rnd - 15
|
||||
vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
|
||||
mov f_64, T1
|
||||
vpsrlq $61, %xmm4, %xmm0 # XMM0 = W[t-2]>>61
|
||||
mov e_64, tmp0
|
||||
vpsrlq $1, %xmm5, %xmm6 # XMM6 = W[t-15]>>1
|
||||
xor g_64, T1
|
||||
RORQ tmp0, 23 # 41
|
||||
vpsrlq $19, %xmm4, %xmm1 # XMM1 = W[t-2]>>19
|
||||
and e_64, T1
|
||||
xor e_64, tmp0
|
||||
vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19
|
||||
xor g_64, T1
|
||||
idx = \rnd
|
||||
add WK_2(idx), T1#
|
||||
vpsrlq $8, %xmm5, %xmm7 # XMM7 = W[t-15]>>8
|
||||
RORQ tmp0, 4 # 18
|
||||
vpsrlq $6, %xmm4, %xmm2 # XMM2 = W[t-2]>>6
|
||||
xor e_64, tmp0
|
||||
mov a_64, T2
|
||||
add h_64, T1
|
||||
vpxor %xmm7, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8
|
||||
RORQ tmp0, 14 # 14
|
||||
add tmp0, T1
|
||||
vpsrlq $7, %xmm5, %xmm8 # XMM8 = W[t-15]>>7
|
||||
mov a_64, tmp0
|
||||
xor c_64, T2
|
||||
vpsllq $(64-61), %xmm4, %xmm3 # XMM3 = W[t-2]<<3
|
||||
and c_64, tmp0
|
||||
and b_64, T2
|
||||
vpxor %xmm3, %xmm2, %xmm2 # XMM2 = W[t-2]>>6 ^ W[t-2]<<3
|
||||
xor tmp0, T2
|
||||
mov a_64, tmp0
|
||||
vpsllq $(64-1), %xmm5, %xmm9 # XMM9 = W[t-15]<<63
|
||||
RORQ tmp0, 5 # 39
|
||||
vpxor %xmm9, %xmm8, %xmm8 # XMM8 = W[t-15]>>7 ^ W[t-15]<<63
|
||||
xor a_64, tmp0
|
||||
add T1, d_64
|
||||
RORQ tmp0, 6 # 34
|
||||
xor a_64, tmp0
|
||||
vpxor %xmm8, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^
|
||||
# W[t-15]>>7 ^ W[t-15]<<63
|
||||
lea (T1, T2), h_64
|
||||
RORQ tmp0, 28 # 28
|
||||
vpsllq $(64-19), %xmm4, %xmm4 # XMM4 = W[t-2]<<25
|
||||
add tmp0, h_64
|
||||
RotateState
|
||||
vpxor %xmm4, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^
|
||||
# W[t-2]<<25
|
||||
mov f_64, T1
|
||||
vpxor %xmm2, %xmm0, %xmm0 # XMM0 = s1(W[t-2])
|
||||
mov e_64, tmp0
|
||||
xor g_64, T1
|
||||
idx = \rnd - 16
|
||||
vpaddq W_t(idx), %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16]
|
||||
idx = \rnd - 7
|
||||
vmovdqu W_t(idx), %xmm1 # XMM1 = W[t-7]
|
||||
RORQ tmp0, 23 # 41
|
||||
and e_64, T1
|
||||
xor e_64, tmp0
|
||||
xor g_64, T1
|
||||
vpsllq $(64-8), %xmm5, %xmm5 # XMM5 = W[t-15]<<56
|
||||
idx = \rnd + 1
|
||||
add WK_2(idx), T1
|
||||
vpxor %xmm5, %xmm6, %xmm6 # XMM6 = s0(W[t-15])
|
||||
RORQ tmp0, 4 # 18
|
||||
vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15])
|
||||
xor e_64, tmp0
|
||||
vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] +
|
||||
# s0(W[t-15]) + W[t-16]
|
||||
mov a_64, T2
|
||||
add h_64, T1
|
||||
RORQ tmp0, 14 # 14
|
||||
add tmp0, T1
|
||||
idx = \rnd
|
||||
vmovdqa %xmm0, W_t(idx) # Store W[t]
|
||||
vpaddq K_t(idx), %xmm0, %xmm0 # Compute W[t]+K[t]
|
||||
vmovdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds
|
||||
mov a_64, tmp0
|
||||
xor c_64, T2
|
||||
and c_64, tmp0
|
||||
and b_64, T2
|
||||
xor tmp0, T2
|
||||
mov a_64, tmp0
|
||||
RORQ tmp0, 5 # 39
|
||||
xor a_64, tmp0
|
||||
add T1, d_64
|
||||
RORQ tmp0, 6 # 34
|
||||
xor a_64, tmp0
|
||||
lea (T1, T2), h_64
|
||||
RORQ tmp0, 28 # 28
|
||||
add tmp0, h_64
|
||||
RotateState
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
# void sha512_transform_avx(const void* M, void* D, u64 L)
|
||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
||||
# message blocks.
|
||||
# L is the message length in SHA512 blocks
|
||||
########################################################################
|
||||
ENTRY(sha512_transform_avx)
|
||||
cmp $0, msglen
|
||||
je nowork
|
||||
|
||||
# Allocate Stack Space
|
||||
mov %rsp, %rax
|
||||
sub $frame_size, %rsp
|
||||
and $~(0x20 - 1), %rsp
|
||||
mov %rax, frame_RSPSAVE(%rsp)
|
||||
|
||||
# Save GPRs
|
||||
mov %rbx, frame_GPRSAVE(%rsp)
|
||||
mov %r12, frame_GPRSAVE +8*1(%rsp)
|
||||
mov %r13, frame_GPRSAVE +8*2(%rsp)
|
||||
mov %r14, frame_GPRSAVE +8*3(%rsp)
|
||||
mov %r15, frame_GPRSAVE +8*4(%rsp)
|
||||
|
||||
updateblock:
|
||||
|
||||
# Load state variables
|
||||
mov DIGEST(0), a_64
|
||||
mov DIGEST(1), b_64
|
||||
mov DIGEST(2), c_64
|
||||
mov DIGEST(3), d_64
|
||||
mov DIGEST(4), e_64
|
||||
mov DIGEST(5), f_64
|
||||
mov DIGEST(6), g_64
|
||||
mov DIGEST(7), h_64
|
||||
|
||||
t = 0
|
||||
.rept 80/2 + 1
|
||||
# (80 rounds) / (2 rounds/iteration) + (1 iteration)
|
||||
# +1 iteration because the scheduler leads hashing by 1 iteration
|
||||
.if t < 2
|
||||
# BSWAP 2 QWORDS
|
||||
vmovdqa XMM_QWORD_BSWAP(%rip), %xmm1
|
||||
vmovdqu MSG(t), %xmm0
|
||||
vpshufb %xmm1, %xmm0, %xmm0 # BSWAP
|
||||
vmovdqa %xmm0, W_t(t) # Store Scheduled Pair
|
||||
vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t]
|
||||
vmovdqa %xmm0, WK_2(t) # Store into WK for rounds
|
||||
.elseif t < 16
|
||||
# BSWAP 2 QWORDS# Compute 2 Rounds
|
||||
vmovdqu MSG(t), %xmm0
|
||||
vpshufb %xmm1, %xmm0, %xmm0 # BSWAP
|
||||
SHA512_Round t-2 # Round t-2
|
||||
vmovdqa %xmm0, W_t(t) # Store Scheduled Pair
|
||||
vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t]
|
||||
SHA512_Round t-1 # Round t-1
|
||||
vmovdqa %xmm0, WK_2(t)# Store W[t]+K[t] into WK
|
||||
.elseif t < 79
|
||||
# Schedule 2 QWORDS# Compute 2 Rounds
|
||||
SHA512_2Sched_2Round_avx t
|
||||
.else
|
||||
# Compute 2 Rounds
|
||||
SHA512_Round t-2
|
||||
SHA512_Round t-1
|
||||
.endif
|
||||
t = t+2
|
||||
.endr
|
||||
|
||||
# Update digest
|
||||
add a_64, DIGEST(0)
|
||||
add b_64, DIGEST(1)
|
||||
add c_64, DIGEST(2)
|
||||
add d_64, DIGEST(3)
|
||||
add e_64, DIGEST(4)
|
||||
add f_64, DIGEST(5)
|
||||
add g_64, DIGEST(6)
|
||||
add h_64, DIGEST(7)
|
||||
|
||||
# Advance to next message block
|
||||
add $16*8, msg
|
||||
dec msglen
|
||||
jnz updateblock
|
||||
|
||||
# Restore GPRs
|
||||
mov frame_GPRSAVE(%rsp), %rbx
|
||||
mov frame_GPRSAVE +8*1(%rsp), %r12
|
||||
mov frame_GPRSAVE +8*2(%rsp), %r13
|
||||
mov frame_GPRSAVE +8*3(%rsp), %r14
|
||||
mov frame_GPRSAVE +8*4(%rsp), %r15
|
||||
|
||||
# Restore Stack Pointer
|
||||
mov frame_RSPSAVE(%rsp), %rsp
|
||||
|
||||
nowork:
|
||||
ret
|
||||
ENDPROC(sha512_transform_avx)
|
||||
|
||||
########################################################################
|
||||
### Binary Data
|
||||
|
||||
.data
|
||||
|
||||
.align 16
|
||||
|
||||
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
|
||||
XMM_QWORD_BSWAP:
|
||||
.octa 0x08090a0b0c0d0e0f0001020304050607
|
||||
|
||||
# K[t] used in SHA512 hashing
|
||||
K512:
|
||||
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
|
||||
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
|
||||
.quad 0x3956c25bf348b538,0x59f111f1b605d019
|
||||
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
|
||||
.quad 0xd807aa98a3030242,0x12835b0145706fbe
|
||||
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
|
||||
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
|
||||
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
|
||||
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
|
||||
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
|
||||
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
|
||||
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
|
||||
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
|
||||
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
|
||||
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
|
||||
.quad 0x06ca6351e003826f,0x142929670a0e6e70
|
||||
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
|
||||
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
|
||||
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
|
||||
.quad 0x81c2c92e47edaee6,0x92722c851482353b
|
||||
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
|
||||
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
|
||||
.quad 0xd192e819d6ef5218,0xd69906245565a910
|
||||
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
|
||||
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
|
||||
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
|
||||
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
|
||||
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
|
||||
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
|
||||
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
|
||||
.quad 0x90befffa23631e28,0xa4506cebde82bde9
|
||||
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
|
||||
.quad 0xca273eceea26619c,0xd186b8c721c0c207
|
||||
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
|
||||
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
|
||||
.quad 0x113f9804bef90dae,0x1b710b35131c471b
|
||||
.quad 0x28db77f523047d84,0x32caab7b40c72493
|
||||
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
|
||||
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
|
||||
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
|
||||
741
pkgs/crypto/sha512/sha512-avx2-asm.S
Normal file
741
pkgs/crypto/sha512/sha512-avx2-asm.S
Normal file
@@ -0,0 +1,741 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-512 with AVX2 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# David Cote <david.m.cote@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-512 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
# This code schedules 1 blocks at a time, with 4 lanes per block
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
# Virtual Registers
|
||||
Y_0 = %ymm4
|
||||
Y_1 = %ymm5
|
||||
Y_2 = %ymm6
|
||||
Y_3 = %ymm7
|
||||
|
||||
YTMP0 = %ymm0
|
||||
YTMP1 = %ymm1
|
||||
YTMP2 = %ymm2
|
||||
YTMP3 = %ymm3
|
||||
YTMP4 = %ymm8
|
||||
XFER = YTMP0
|
||||
|
||||
BYTE_FLIP_MASK = %ymm9
|
||||
|
||||
# 1st arg
|
||||
INP = %rdi
|
||||
# 2nd arg
|
||||
CTX = %rsi
|
||||
# 3rd arg
|
||||
NUM_BLKS = %rdx
|
||||
|
||||
c = %rcx
|
||||
d = %r8
|
||||
e = %rdx
|
||||
y3 = %rdi
|
||||
|
||||
TBL = %rbp
|
||||
|
||||
a = %rax
|
||||
b = %rbx
|
||||
|
||||
f = %r9
|
||||
g = %r10
|
||||
h = %r11
|
||||
old_h = %r11
|
||||
|
||||
T1 = %r12
|
||||
y0 = %r13
|
||||
y1 = %r14
|
||||
y2 = %r15
|
||||
|
||||
y4 = %r12
|
||||
|
||||
# Local variables (stack frame)
|
||||
XFER_SIZE = 4*8
|
||||
SRND_SIZE = 1*8
|
||||
INP_SIZE = 1*8
|
||||
INPEND_SIZE = 1*8
|
||||
RSPSAVE_SIZE = 1*8
|
||||
GPRSAVE_SIZE = 6*8
|
||||
|
||||
frame_XFER = 0
|
||||
frame_SRND = frame_XFER + XFER_SIZE
|
||||
frame_INP = frame_SRND + SRND_SIZE
|
||||
frame_INPEND = frame_INP + INP_SIZE
|
||||
frame_RSPSAVE = frame_INPEND + INPEND_SIZE
|
||||
frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
|
||||
frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
||||
|
||||
## assume buffers not aligned
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
|
||||
# COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask
|
||||
# Load ymm with mem and byte swap each dword
|
||||
.macro COPY_YMM_AND_BSWAP p1 p2 p3
|
||||
VMOVDQ \p2, \p1
|
||||
vpshufb \p3, \p1, \p1
|
||||
.endm
|
||||
# rotate_Ys
|
||||
# Rotate values of symbols Y0...Y3
|
||||
.macro rotate_Ys
|
||||
Y_ = Y_0
|
||||
Y_0 = Y_1
|
||||
Y_1 = Y_2
|
||||
Y_2 = Y_3
|
||||
Y_3 = Y_
|
||||
.endm
|
||||
|
||||
# RotateState
|
||||
.macro RotateState
|
||||
# Rotate symbols a..h right
|
||||
old_h = h
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
# macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL
|
||||
# YDST = {YSRC1, YSRC2} >> RVAL*8
|
||||
.macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL
|
||||
vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI}
|
||||
vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED
|
||||
################################### RND N + 0 #########################################
|
||||
|
||||
# Extract w[t-7]
|
||||
MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7]
|
||||
# Calculate w[t-16] + w[t-7]
|
||||
vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
|
||||
# Extract w[t-15]
|
||||
MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15]
|
||||
|
||||
# Calculate sigma0
|
||||
|
||||
# Calculate w[t-15] ror 1
|
||||
vpsrlq $1, YTMP1, YTMP2
|
||||
vpsllq $(64-1), YTMP1, YTMP3
|
||||
vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1
|
||||
# Calculate w[t-15] shr 7
|
||||
vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
add frame_XFER(%rsp),h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
RotateState
|
||||
|
||||
################################### RND N + 1 #########################################
|
||||
|
||||
# Calculate w[t-15] ror 8
|
||||
vpsrlq $8, YTMP1, YTMP2
|
||||
vpsllq $(64-8), YTMP1, YTMP1
|
||||
vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8
|
||||
# XOR the three components
|
||||
vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
|
||||
vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0
|
||||
|
||||
|
||||
# Add three components, w[t-16], w[t-7] and sigma0
|
||||
vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
|
||||
# Move to appropriate lanes for calculating w[16] and w[17]
|
||||
vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
|
||||
# Move to appropriate lanes for calculating w[18] and w[19]
|
||||
vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
|
||||
|
||||
# Calculate w[16] and w[17] in both 128 bit lanes
|
||||
|
||||
# Calculate sigma1 for w[16] and w[17] on both 128 bit lanes
|
||||
vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA}
|
||||
vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA}
|
||||
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
add 1*8+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
RotateState
|
||||
|
||||
|
||||
################################### RND N + 2 #########################################
|
||||
|
||||
vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA}
|
||||
vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA}
|
||||
vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA}
|
||||
vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
|
||||
vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA}
|
||||
vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA}
|
||||
vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA}
|
||||
vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
|
||||
# (W[-2] ror 61) ^ (W[-2] >> 6) {BABA}
|
||||
|
||||
# Add sigma1 to the other compunents to get w[16] and w[17]
|
||||
vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]}
|
||||
|
||||
# Calculate sigma1 for w[18] and w[19] for upper 128 bit lane
|
||||
vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--}
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
add 2*8+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
mov f, y2 # y2 = f # CH
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
RotateState
|
||||
|
||||
################################### RND N + 3 #########################################
|
||||
|
||||
vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--}
|
||||
vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--}
|
||||
vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--}
|
||||
vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
|
||||
vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--}
|
||||
vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--}
|
||||
vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--}
|
||||
vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
|
||||
# (W[-2] ror 61) ^ (W[-2] >> 6) {DC--}
|
||||
|
||||
# Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19]
|
||||
# to newly calculated sigma1 to get w[18] and w[19]
|
||||
vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --}
|
||||
|
||||
# Form w[19, w[18], w17], w[16]
|
||||
vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]}
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
add 3*8+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
RotateState
|
||||
|
||||
rotate_Ys
|
||||
.endm
|
||||
|
||||
.macro DO_4ROUNDS
|
||||
|
||||
################################### RND N + 0 #########################################
|
||||
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
add frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
RotateState
|
||||
|
||||
################################### RND N + 1 #########################################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
add 8*1+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
RotateState
|
||||
|
||||
################################### RND N + 2 #########################################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
add 8*2+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
RotateState
|
||||
|
||||
################################### RND N + 3 #########################################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $41, e, y0 # y0 = e >> 41 # S1A
|
||||
rorx $18, e, y1 # y1 = e >> 18 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
|
||||
rorx $14, e, y1 # y1 = (e >> 14) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
|
||||
rorx $34, a, T1 # T1 = a >> 34 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $39, a, y1 # y1 = a >> 39 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
|
||||
rorx $28, a, T1 # T1 = (a >> 28) # S0
|
||||
add 8*3+frame_XFER(%rsp), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
RotateState
|
||||
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
# void sha512_transform_rorx(const void* M, void* D, uint64_t L)#
|
||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
||||
# message blocks.
|
||||
# L is the message length in SHA512 blocks
|
||||
########################################################################
|
||||
ENTRY(sha512_transform_rorx)
|
||||
# Allocate Stack Space
|
||||
mov %rsp, %rax
|
||||
sub $frame_size, %rsp
|
||||
and $~(0x20 - 1), %rsp
|
||||
mov %rax, frame_RSPSAVE(%rsp)
|
||||
|
||||
# Save GPRs
|
||||
mov %rbp, frame_GPRSAVE(%rsp)
|
||||
mov %rbx, 8*1+frame_GPRSAVE(%rsp)
|
||||
mov %r12, 8*2+frame_GPRSAVE(%rsp)
|
||||
mov %r13, 8*3+frame_GPRSAVE(%rsp)
|
||||
mov %r14, 8*4+frame_GPRSAVE(%rsp)
|
||||
mov %r15, 8*5+frame_GPRSAVE(%rsp)
|
||||
|
||||
shl $7, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
add INP, NUM_BLKS # pointer to end of data
|
||||
mov NUM_BLKS, frame_INPEND(%rsp)
|
||||
|
||||
## load initial digest
|
||||
mov 8*0(CTX),a
|
||||
mov 8*1(CTX),b
|
||||
mov 8*2(CTX),c
|
||||
mov 8*3(CTX),d
|
||||
mov 8*4(CTX),e
|
||||
mov 8*5(CTX),f
|
||||
mov 8*6(CTX),g
|
||||
mov 8*7(CTX),h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
|
||||
loop0:
|
||||
lea K512(%rip), TBL
|
||||
|
||||
## byte swap first 16 dwords
|
||||
COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK
|
||||
COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK
|
||||
COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK
|
||||
COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK
|
||||
|
||||
mov INP, frame_INP(%rsp)
|
||||
|
||||
## schedule 64 input dwords, by doing 12 rounds of 4 each
|
||||
movq $4, frame_SRND(%rsp)
|
||||
|
||||
.align 16
|
||||
loop1:
|
||||
vpaddq (TBL), Y_0, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddq 1*32(TBL), Y_0, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddq 2*32(TBL), Y_0, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddq 3*32(TBL), Y_0, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
add $(4*32), TBL
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
subq $1, frame_SRND(%rsp)
|
||||
jne loop1
|
||||
|
||||
movq $2, frame_SRND(%rsp)
|
||||
loop2:
|
||||
vpaddq (TBL), Y_0, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
DO_4ROUNDS
|
||||
vpaddq 1*32(TBL), Y_1, XFER
|
||||
vmovdqa XFER, frame_XFER(%rsp)
|
||||
add $(2*32), TBL
|
||||
DO_4ROUNDS
|
||||
|
||||
vmovdqa Y_2, Y_0
|
||||
vmovdqa Y_3, Y_1
|
||||
|
||||
subq $1, frame_SRND(%rsp)
|
||||
jne loop2
|
||||
|
||||
addm 8*0(CTX),a
|
||||
addm 8*1(CTX),b
|
||||
addm 8*2(CTX),c
|
||||
addm 8*3(CTX),d
|
||||
addm 8*4(CTX),e
|
||||
addm 8*5(CTX),f
|
||||
addm 8*6(CTX),g
|
||||
addm 8*7(CTX),h
|
||||
|
||||
mov frame_INP(%rsp), INP
|
||||
add $128, INP
|
||||
cmp frame_INPEND(%rsp), INP
|
||||
jne loop0
|
||||
|
||||
done_hash:
|
||||
|
||||
# Restore GPRs
|
||||
mov frame_GPRSAVE(%rsp) ,%rbp
|
||||
mov 8*1+frame_GPRSAVE(%rsp) ,%rbx
|
||||
mov 8*2+frame_GPRSAVE(%rsp) ,%r12
|
||||
mov 8*3+frame_GPRSAVE(%rsp) ,%r13
|
||||
mov 8*4+frame_GPRSAVE(%rsp) ,%r14
|
||||
mov 8*5+frame_GPRSAVE(%rsp) ,%r15
|
||||
|
||||
# Restore Stack Pointer
|
||||
mov frame_RSPSAVE(%rsp), %rsp
|
||||
ret
|
||||
ENDPROC(sha512_transform_rorx)
|
||||
|
||||
########################################################################
|
||||
### Binary Data
|
||||
|
||||
.data
|
||||
|
||||
.align 64
|
||||
# K[t] used in SHA512 hashing
|
||||
K512:
|
||||
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
|
||||
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
|
||||
.quad 0x3956c25bf348b538,0x59f111f1b605d019
|
||||
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
|
||||
.quad 0xd807aa98a3030242,0x12835b0145706fbe
|
||||
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
|
||||
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
|
||||
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
|
||||
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
|
||||
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
|
||||
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
|
||||
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
|
||||
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
|
||||
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
|
||||
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
|
||||
.quad 0x06ca6351e003826f,0x142929670a0e6e70
|
||||
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
|
||||
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
|
||||
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
|
||||
.quad 0x81c2c92e47edaee6,0x92722c851482353b
|
||||
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
|
||||
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
|
||||
.quad 0xd192e819d6ef5218,0xd69906245565a910
|
||||
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
|
||||
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
|
||||
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
|
||||
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
|
||||
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
|
||||
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
|
||||
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
|
||||
.quad 0x90befffa23631e28,0xa4506cebde82bde9
|
||||
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
|
||||
.quad 0xca273eceea26619c,0xd186b8c721c0c207
|
||||
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
|
||||
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
|
||||
.quad 0x113f9804bef90dae,0x1b710b35131c471b
|
||||
.quad 0x28db77f523047d84,0x32caab7b40c72493
|
||||
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
|
||||
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
|
||||
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
|
||||
|
||||
.align 32
|
||||
|
||||
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x08090a0b0c0d0e0f0001020304050607
|
||||
.octa 0x18191a1b1c1d1e1f1011121314151617
|
||||
|
||||
MASK_YMM_LO:
|
||||
.octa 0x00000000000000000000000000000000
|
||||
.octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
421
pkgs/crypto/sha512/sha512-ssse3-asm.S
Normal file
421
pkgs/crypto/sha512/sha512-ssse3-asm.S
Normal file
@@ -0,0 +1,421 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-512 with SSSE3 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# David Cote <david.m.cote@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-512 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
# Virtual Registers
|
||||
# ARG1
|
||||
msg = %rdi
|
||||
# ARG2
|
||||
digest = %rsi
|
||||
# ARG3
|
||||
msglen = %rdx
|
||||
T1 = %rcx
|
||||
T2 = %r8
|
||||
a_64 = %r9
|
||||
b_64 = %r10
|
||||
c_64 = %r11
|
||||
d_64 = %r12
|
||||
e_64 = %r13
|
||||
f_64 = %r14
|
||||
g_64 = %r15
|
||||
h_64 = %rbx
|
||||
tmp0 = %rax
|
||||
|
||||
# Local variables (stack frame)
|
||||
|
||||
W_SIZE = 80*8
|
||||
WK_SIZE = 2*8
|
||||
RSPSAVE_SIZE = 1*8
|
||||
GPRSAVE_SIZE = 5*8
|
||||
|
||||
frame_W = 0
|
||||
frame_WK = frame_W + W_SIZE
|
||||
frame_RSPSAVE = frame_WK + WK_SIZE
|
||||
frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
|
||||
frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
||||
|
||||
# Useful QWORD "arrays" for simpler memory references
|
||||
# MSG, DIGEST, K_t, W_t are arrays
|
||||
# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
|
||||
|
||||
# Input message (arg1)
|
||||
#define MSG(i) 8*i(msg)
|
||||
|
||||
# Output Digest (arg2)
|
||||
#define DIGEST(i) 8*i(digest)
|
||||
|
||||
# SHA Constants (static mem)
|
||||
#define K_t(i) 8*i+K512(%rip)
|
||||
|
||||
# Message Schedule (stack frame)
|
||||
#define W_t(i) 8*i+frame_W(%rsp)
|
||||
|
||||
# W[t]+K[t] (stack frame)
|
||||
#define WK_2(i) 8*((i%2))+frame_WK(%rsp)
|
||||
|
||||
.macro RotateState
|
||||
# Rotate symbols a..h right
|
||||
TMP = h_64
|
||||
h_64 = g_64
|
||||
g_64 = f_64
|
||||
f_64 = e_64
|
||||
e_64 = d_64
|
||||
d_64 = c_64
|
||||
c_64 = b_64
|
||||
b_64 = a_64
|
||||
a_64 = TMP
|
||||
.endm
|
||||
|
||||
.macro SHA512_Round rnd
|
||||
|
||||
# Compute Round %%t
|
||||
mov f_64, T1 # T1 = f
|
||||
mov e_64, tmp0 # tmp = e
|
||||
xor g_64, T1 # T1 = f ^ g
|
||||
ror $23, tmp0 # 41 # tmp = e ror 23
|
||||
and e_64, T1 # T1 = (f ^ g) & e
|
||||
xor e_64, tmp0 # tmp = (e ror 23) ^ e
|
||||
xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g)
|
||||
idx = \rnd
|
||||
add WK_2(idx), T1 # W[t] + K[t] from message scheduler
|
||||
ror $4, tmp0 # 18 # tmp = ((e ror 23) ^ e) ror 4
|
||||
xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e
|
||||
mov a_64, T2 # T2 = a
|
||||
add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
|
||||
ror $14, tmp0 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e)
|
||||
add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
|
||||
mov a_64, tmp0 # tmp = a
|
||||
xor c_64, T2 # T2 = a ^ c
|
||||
and c_64, tmp0 # tmp = a & c
|
||||
and b_64, T2 # T2 = (a ^ c) & b
|
||||
xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c)
|
||||
mov a_64, tmp0 # tmp = a
|
||||
ror $5, tmp0 # 39 # tmp = a ror 5
|
||||
xor a_64, tmp0 # tmp = (a ror 5) ^ a
|
||||
add T1, d_64 # e(next_state) = d + T1
|
||||
ror $6, tmp0 # 34 # tmp = ((a ror 5) ^ a) ror 6
|
||||
xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a
|
||||
lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c)
|
||||
ror $28, tmp0 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a)
|
||||
add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a)
|
||||
RotateState
|
||||
.endm
|
||||
|
||||
.macro SHA512_2Sched_2Round_sse rnd
|
||||
|
||||
# Compute rounds t-2 and t-1
|
||||
# Compute message schedule QWORDS t and t+1
|
||||
|
||||
# Two rounds are computed based on the values for K[t-2]+W[t-2] and
|
||||
# K[t-1]+W[t-1] which were previously stored at WK_2 by the message
|
||||
# scheduler.
|
||||
# The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)].
|
||||
# They are then added to their respective SHA512 constants at
|
||||
# [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)]
|
||||
# For brievity, the comments following vectored instructions only refer to
|
||||
# the first of a pair of QWORDS.
|
||||
# Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]}
|
||||
# The computation of the message schedule and the rounds are tightly
|
||||
# stitched to take advantage of instruction-level parallelism.
|
||||
# For clarity, integer instructions (for the rounds calculation) are indented
|
||||
# by one tab. Vectored instructions (for the message scheduler) are indented
|
||||
# by two tabs.
|
||||
|
||||
mov f_64, T1
|
||||
idx = \rnd -2
|
||||
movdqa W_t(idx), %xmm2 # XMM2 = W[t-2]
|
||||
xor g_64, T1
|
||||
and e_64, T1
|
||||
movdqa %xmm2, %xmm0 # XMM0 = W[t-2]
|
||||
xor g_64, T1
|
||||
idx = \rnd
|
||||
add WK_2(idx), T1
|
||||
idx = \rnd - 15
|
||||
movdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
|
||||
mov e_64, tmp0
|
||||
ror $23, tmp0 # 41
|
||||
movdqa %xmm5, %xmm3 # XMM3 = W[t-15]
|
||||
xor e_64, tmp0
|
||||
ror $4, tmp0 # 18
|
||||
psrlq $61-19, %xmm0 # XMM0 = W[t-2] >> 42
|
||||
xor e_64, tmp0
|
||||
ror $14, tmp0 # 14
|
||||
psrlq $(8-7), %xmm3 # XMM3 = W[t-15] >> 1
|
||||
add tmp0, T1
|
||||
add h_64, T1
|
||||
pxor %xmm2, %xmm0 # XMM0 = (W[t-2] >> 42) ^ W[t-2]
|
||||
mov a_64, T2
|
||||
xor c_64, T2
|
||||
pxor %xmm5, %xmm3 # XMM3 = (W[t-15] >> 1) ^ W[t-15]
|
||||
and b_64, T2
|
||||
mov a_64, tmp0
|
||||
psrlq $(19-6), %xmm0 # XMM0 = ((W[t-2]>>42)^W[t-2])>>13
|
||||
and c_64, tmp0
|
||||
xor tmp0, T2
|
||||
psrlq $(7-1), %xmm3 # XMM3 = ((W[t-15]>>1)^W[t-15])>>6
|
||||
mov a_64, tmp0
|
||||
ror $5, tmp0 # 39
|
||||
pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2]
|
||||
xor a_64, tmp0
|
||||
ror $6, tmp0 # 34
|
||||
pxor %xmm5, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]
|
||||
xor a_64, tmp0
|
||||
ror $28, tmp0 # 28
|
||||
psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6
|
||||
add tmp0, T2
|
||||
add T1, d_64
|
||||
psrlq $1, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1
|
||||
lea (T1, T2), h_64
|
||||
RotateState
|
||||
movdqa %xmm2, %xmm1 # XMM1 = W[t-2]
|
||||
mov f_64, T1
|
||||
xor g_64, T1
|
||||
movdqa %xmm5, %xmm4 # XMM4 = W[t-15]
|
||||
and e_64, T1
|
||||
xor g_64, T1
|
||||
psllq $(64-19)-(64-61) , %xmm1 # XMM1 = W[t-2] << 42
|
||||
idx = \rnd + 1
|
||||
add WK_2(idx), T1
|
||||
mov e_64, tmp0
|
||||
psllq $(64-1)-(64-8), %xmm4 # XMM4 = W[t-15] << 7
|
||||
ror $23, tmp0 # 41
|
||||
xor e_64, tmp0
|
||||
pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2]
|
||||
ror $4, tmp0 # 18
|
||||
xor e_64, tmp0
|
||||
pxor %xmm5, %xmm4 # XMM4 = (W[t-15]<<7)^W[t-15]
|
||||
ror $14, tmp0 # 14
|
||||
add tmp0, T1
|
||||
psllq $(64-61), %xmm1 # XMM1 = ((W[t-2] << 42)^W[t-2])<<3
|
||||
add h_64, T1
|
||||
mov a_64, T2
|
||||
psllq $(64-8), %xmm4 # XMM4 = ((W[t-15]<<7)^W[t-15])<<56
|
||||
xor c_64, T2
|
||||
and b_64, T2
|
||||
pxor %xmm1, %xmm0 # XMM0 = s1(W[t-2])
|
||||
mov a_64, tmp0
|
||||
and c_64, tmp0
|
||||
idx = \rnd - 7
|
||||
movdqu W_t(idx), %xmm1 # XMM1 = W[t-7]
|
||||
xor tmp0, T2
|
||||
pxor %xmm4, %xmm3 # XMM3 = s0(W[t-15])
|
||||
mov a_64, tmp0
|
||||
paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15])
|
||||
ror $5, tmp0 # 39
|
||||
idx =\rnd-16
|
||||
paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16]
|
||||
xor a_64, tmp0
|
||||
paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16]
|
||||
ror $6, tmp0 # 34
|
||||
movdqa %xmm0, W_t(\rnd) # Store scheduled qwords
|
||||
xor a_64, tmp0
|
||||
paddq K_t(\rnd), %xmm0 # Compute W[t]+K[t]
|
||||
ror $28, tmp0 # 28
|
||||
idx = \rnd
|
||||
movdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds
|
||||
add tmp0, T2
|
||||
add T1, d_64
|
||||
lea (T1, T2), h_64
|
||||
RotateState
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
# void sha512_transform_ssse3(const void* M, void* D, u64 L)#
|
||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
||||
# message blocks.
|
||||
# L is the message length in SHA512 blocks.
|
||||
########################################################################
|
||||
ENTRY(sha512_transform_ssse3)
|
||||
|
||||
cmp $0, msglen
|
||||
je nowork
|
||||
|
||||
# Allocate Stack Space
|
||||
mov %rsp, %rax
|
||||
sub $frame_size, %rsp
|
||||
and $~(0x20 - 1), %rsp
|
||||
mov %rax, frame_RSPSAVE(%rsp)
|
||||
|
||||
# Save GPRs
|
||||
mov %rbx, frame_GPRSAVE(%rsp)
|
||||
mov %r12, frame_GPRSAVE +8*1(%rsp)
|
||||
mov %r13, frame_GPRSAVE +8*2(%rsp)
|
||||
mov %r14, frame_GPRSAVE +8*3(%rsp)
|
||||
mov %r15, frame_GPRSAVE +8*4(%rsp)
|
||||
|
||||
updateblock:
|
||||
|
||||
# Load state variables
|
||||
mov DIGEST(0), a_64
|
||||
mov DIGEST(1), b_64
|
||||
mov DIGEST(2), c_64
|
||||
mov DIGEST(3), d_64
|
||||
mov DIGEST(4), e_64
|
||||
mov DIGEST(5), f_64
|
||||
mov DIGEST(6), g_64
|
||||
mov DIGEST(7), h_64
|
||||
|
||||
t = 0
|
||||
.rept 80/2 + 1
|
||||
# (80 rounds) / (2 rounds/iteration) + (1 iteration)
|
||||
# +1 iteration because the scheduler leads hashing by 1 iteration
|
||||
.if t < 2
|
||||
# BSWAP 2 QWORDS
|
||||
movdqa XMM_QWORD_BSWAP(%rip), %xmm1
|
||||
movdqu MSG(t), %xmm0
|
||||
pshufb %xmm1, %xmm0 # BSWAP
|
||||
movdqa %xmm0, W_t(t) # Store Scheduled Pair
|
||||
paddq K_t(t), %xmm0 # Compute W[t]+K[t]
|
||||
movdqa %xmm0, WK_2(t) # Store into WK for rounds
|
||||
.elseif t < 16
|
||||
# BSWAP 2 QWORDS# Compute 2 Rounds
|
||||
movdqu MSG(t), %xmm0
|
||||
pshufb %xmm1, %xmm0 # BSWAP
|
||||
SHA512_Round t-2 # Round t-2
|
||||
movdqa %xmm0, W_t(t) # Store Scheduled Pair
|
||||
paddq K_t(t), %xmm0 # Compute W[t]+K[t]
|
||||
SHA512_Round t-1 # Round t-1
|
||||
movdqa %xmm0, WK_2(t) # Store W[t]+K[t] into WK
|
||||
.elseif t < 79
|
||||
# Schedule 2 QWORDS# Compute 2 Rounds
|
||||
SHA512_2Sched_2Round_sse t
|
||||
.else
|
||||
# Compute 2 Rounds
|
||||
SHA512_Round t-2
|
||||
SHA512_Round t-1
|
||||
.endif
|
||||
t = t+2
|
||||
.endr
|
||||
|
||||
# Update digest
|
||||
add a_64, DIGEST(0)
|
||||
add b_64, DIGEST(1)
|
||||
add c_64, DIGEST(2)
|
||||
add d_64, DIGEST(3)
|
||||
add e_64, DIGEST(4)
|
||||
add f_64, DIGEST(5)
|
||||
add g_64, DIGEST(6)
|
||||
add h_64, DIGEST(7)
|
||||
|
||||
# Advance to next message block
|
||||
add $16*8, msg
|
||||
dec msglen
|
||||
jnz updateblock
|
||||
|
||||
# Restore GPRs
|
||||
mov frame_GPRSAVE(%rsp), %rbx
|
||||
mov frame_GPRSAVE +8*1(%rsp), %r12
|
||||
mov frame_GPRSAVE +8*2(%rsp), %r13
|
||||
mov frame_GPRSAVE +8*3(%rsp), %r14
|
||||
mov frame_GPRSAVE +8*4(%rsp), %r15
|
||||
|
||||
# Restore Stack Pointer
|
||||
mov frame_RSPSAVE(%rsp), %rsp
|
||||
|
||||
nowork:
|
||||
ret
|
||||
ENDPROC(sha512_transform_ssse3)
|
||||
|
||||
########################################################################
|
||||
### Binary Data
|
||||
|
||||
.data
|
||||
|
||||
.align 16
|
||||
|
||||
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
|
||||
XMM_QWORD_BSWAP:
|
||||
.octa 0x08090a0b0c0d0e0f0001020304050607
|
||||
|
||||
# K[t] used in SHA512 hashing
|
||||
K512:
|
||||
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
|
||||
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
|
||||
.quad 0x3956c25bf348b538,0x59f111f1b605d019
|
||||
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
|
||||
.quad 0xd807aa98a3030242,0x12835b0145706fbe
|
||||
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
|
||||
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
|
||||
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
|
||||
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
|
||||
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
|
||||
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
|
||||
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
|
||||
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
|
||||
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
|
||||
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
|
||||
.quad 0x06ca6351e003826f,0x142929670a0e6e70
|
||||
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
|
||||
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
|
||||
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
|
||||
.quad 0x81c2c92e47edaee6,0x92722c851482353b
|
||||
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
|
||||
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
|
||||
.quad 0xd192e819d6ef5218,0xd69906245565a910
|
||||
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
|
||||
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
|
||||
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
|
||||
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
|
||||
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
|
||||
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
|
||||
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
|
||||
.quad 0x90befffa23631e28,0xa4506cebde82bde9
|
||||
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
|
||||
.quad 0xca273eceea26619c,0xd186b8c721c0c207
|
||||
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
|
||||
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
|
||||
.quad 0x113f9804bef90dae,0x1b710b35131c471b
|
||||
.quad 0x28db77f523047d84,0x32caab7b40c72493
|
||||
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
|
||||
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
|
||||
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
|
||||
9
pkgs/crypto/sha512/sha512.go
Normal file
9
pkgs/crypto/sha512/sha512.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build amd64
|
||||
|
||||
package sha512
|
||||
|
||||
// #include <stdint.h>
|
||||
// void sha512_transform_avx(const void* M, void* D, uint64_t L);
|
||||
// void sha512_transform_ssse3(const void* M, void* D, uint64_t L);
|
||||
// void sha512_transform_rorx(const void* M, void* D, uint64_t L);
|
||||
import "C"
|
||||
@@ -123,6 +123,7 @@ func (aStorage *appendStorage) List() ([]storage.ObjectDescription, error) {
|
||||
var objectDescription storage.ObjectDescription
|
||||
objectDescription.Name = objectName
|
||||
objectDescription.Md5sum = ""
|
||||
objectDescription.Hash = ""
|
||||
objectDescription.Protectionlevel = ""
|
||||
objectDescList = append(objectDescList, objectDescription)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/minio-io/minio/pkgs/split"
|
||||
"github.com/minio-io/minio/pkgs/storage"
|
||||
"github.com/minio-io/minio/pkgs/storage/appendstorage"
|
||||
"github.com/spaolacci/murmur3"
|
||||
)
|
||||
|
||||
type encodedStorage struct {
|
||||
@@ -30,6 +31,7 @@ type encodedStorage struct {
|
||||
type StorageEntry struct {
|
||||
Path string
|
||||
Md5sum []byte
|
||||
Murmurhash uint64
|
||||
Blocks []StorageBlockEntry
|
||||
Encoderparams erasure.EncoderParams
|
||||
}
|
||||
@@ -105,6 +107,7 @@ func (eStorage *encodedStorage) List() ([]storage.ObjectDescription, error) {
|
||||
protectionLevel := strconv.Itoa(objectEntry.Encoderparams.K) + "," + strconv.Itoa(objectEntry.Encoderparams.M)
|
||||
objectDescription.Name = objectName
|
||||
objectDescription.Md5sum = hex.EncodeToString(objectEntry.Md5sum)
|
||||
objectDescription.Hash = strconv.FormatUint(objectEntry.Murmurhash, 16)
|
||||
objectDescription.Protectionlevel = protectionLevel
|
||||
objectDescList = append(objectDescList, objectDescription)
|
||||
}
|
||||
@@ -126,15 +129,18 @@ func (eStorage *encodedStorage) Put(objectPath string, object io.Reader) error {
|
||||
}
|
||||
encoder := erasure.NewEncoder(encoderParameters)
|
||||
entry := StorageEntry{
|
||||
Path: objectPath,
|
||||
Md5sum: nil,
|
||||
Blocks: make([]StorageBlockEntry, 0),
|
||||
Path: objectPath,
|
||||
Md5sum: nil,
|
||||
Murmurhash: 0,
|
||||
Blocks: make([]StorageBlockEntry, 0),
|
||||
Encoderparams: erasure.EncoderParams{
|
||||
K: eStorage.K,
|
||||
M: eStorage.M,
|
||||
Technique: erasure.CAUCHY,
|
||||
},
|
||||
}
|
||||
// Hash
|
||||
murmur := murmur3.Sum64([]byte(objectPath))
|
||||
// allocate md5
|
||||
hash := md5.New()
|
||||
i := 0
|
||||
@@ -163,6 +169,7 @@ func (eStorage *encodedStorage) Put(objectPath string, object io.Reader) error {
|
||||
i++
|
||||
}
|
||||
entry.Md5sum = hash.Sum(nil)
|
||||
entry.Murmurhash = murmur
|
||||
eStorage.objects[objectPath] = entry
|
||||
var gobBuffer bytes.Buffer
|
||||
gobEncoder := gob.NewEncoder(&gobBuffer)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/minio-io/minio/pkgs/split"
|
||||
"github.com/minio-io/minio/pkgs/storage"
|
||||
"github.com/minio-io/minio/pkgs/storage/appendstorage"
|
||||
"github.com/spaolacci/murmur3"
|
||||
)
|
||||
|
||||
type fileSystemStorage struct {
|
||||
@@ -27,6 +28,7 @@ type fileSystemStorage struct {
|
||||
type StorageEntry struct {
|
||||
Path string
|
||||
Md5sum []byte
|
||||
Murmurhash uint64
|
||||
ChunkLength int
|
||||
}
|
||||
|
||||
@@ -66,6 +68,7 @@ func (fsStorage *fileSystemStorage) List() ([]storage.ObjectDescription, error)
|
||||
var objectDescription storage.ObjectDescription
|
||||
objectDescription.Name = objectName
|
||||
objectDescription.Md5sum = hex.EncodeToString(objectEntry.Md5sum)
|
||||
objectDescription.Hash = strconv.FormatUint(objectEntry.Murmurhash, 16)
|
||||
objectDescription.Protectionlevel = ""
|
||||
objectDescList = append(objectDescList, objectDescription)
|
||||
}
|
||||
@@ -119,9 +122,11 @@ func (fsStorage *fileSystemStorage) Put(objectPath string, object io.Reader) err
|
||||
entry := StorageEntry{
|
||||
Path: objectPath,
|
||||
Md5sum: nil,
|
||||
Murmurhash: 0,
|
||||
ChunkLength: 0,
|
||||
}
|
||||
|
||||
murmur := murmur3.Sum64([]byte(objectPath))
|
||||
hash := md5.New()
|
||||
i := 0
|
||||
for chunk := range chunks {
|
||||
@@ -138,6 +143,7 @@ func (fsStorage *fileSystemStorage) Put(objectPath string, object io.Reader) err
|
||||
}
|
||||
entry.Md5sum = hash.Sum(nil)
|
||||
entry.ChunkLength = i
|
||||
entry.Murmurhash = murmur
|
||||
fsStorage.objects[objectPath] = entry
|
||||
var gobBuffer bytes.Buffer
|
||||
gobEncoder := gob.NewEncoder(&gobBuffer)
|
||||
|
||||
@@ -12,5 +12,5 @@ type ObjectDescription struct {
|
||||
Name string
|
||||
Md5sum string
|
||||
Protectionlevel string
|
||||
// Hash string - TODO
|
||||
Hash string
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user