Merge branch 'dev' into edge

This commit is contained in:
Adam Ierymenko 2018-03-19 11:18:54 -07:00
commit 209f6d9d2d
71 changed files with 38483 additions and 1387 deletions

View File

@ -0,0 +1,94 @@
#include "crypto_sign.h"
#include "crypto_verify_32.h"
#include "crypto_hash_sha512.h"
#include "randombytes.h"
#include "ge25519.h"
#include "hram.h"
#define MAXBATCH 64
int crypto_sign_open_batch(
unsigned char* const m[],unsigned long long mlen[],
unsigned char* const sm[],const unsigned long long smlen[],
unsigned char* const pk[],
unsigned long long num
)
{
int ret = 0;
unsigned long long i, j;
shortsc25519 r[MAXBATCH];
sc25519 scalars[2*MAXBATCH+1];
ge25519 points[2*MAXBATCH+1];
unsigned char hram[crypto_hash_sha512_BYTES];
unsigned long long batchsize;
for (i = 0;i < num;++i) mlen[i] = -1;
while (num >= 3) {
batchsize = num;
if (batchsize > MAXBATCH) batchsize = MAXBATCH;
for (i = 0;i < batchsize;++i)
if (smlen[i] < 64) goto fallback;
randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize);
/* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */
for(i=0;i<batchsize;i++)
{
sc25519_from32bytes(&scalars[i], sm[i]+32);
sc25519_mul_shortsc(&scalars[i], &scalars[i], &r[i]);
}
for(i=1;i<batchsize;i++)
sc25519_add(&scalars[0], &scalars[0], &scalars[i]);
/* Computing scalars[1] ... scalars[batchsize] as r[i]*H(R[i],A[i],m[i]) */
for(i=0;i<batchsize;i++)
{
get_hram(hram, sm[i], pk[i], m[i], smlen[i]);
sc25519_from64bytes(&scalars[i+1],hram);
sc25519_mul_shortsc(&scalars[i+1],&scalars[i+1],&r[i]);
}
/* Setting scalars[batchsize+1] ... scalars[2*batchsize] to r[i] */
for(i=0;i<batchsize;i++)
sc25519_from_shortsc(&scalars[batchsize+i+1],&r[i]);
/* Computing points */
points[0] = ge25519_base;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[i+1], pk[i])) goto fallback;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[batchsize+i+1], sm[i])) goto fallback;
ge25519_multi_scalarmult_vartime(points, points, scalars, 2*batchsize+1);
if (ge25519_isneutral_vartime(points)) {
for(i=0;i<batchsize;i++)
{
for(j=0;j<smlen[i]-64;j++)
m[i][j] = sm[i][j + 64];
mlen[i] = smlen[i]-64;
}
} else {
fallback:
for (i = 0;i < batchsize;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
}
m += batchsize;
mlen += batchsize;
sm += batchsize;
smlen += batchsize;
pk += batchsize;
num -= batchsize;
}
for (i = 0;i < num;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
.data
.globl crypto_sign_ed25519_amd64_64_121666
.globl crypto_sign_ed25519_amd64_64_MU0
.globl crypto_sign_ed25519_amd64_64_MU1
.globl crypto_sign_ed25519_amd64_64_MU2
.globl crypto_sign_ed25519_amd64_64_MU3
.globl crypto_sign_ed25519_amd64_64_MU4
.globl crypto_sign_ed25519_amd64_64_ORDER0
.globl crypto_sign_ed25519_amd64_64_ORDER1
.globl crypto_sign_ed25519_amd64_64_ORDER2
.globl crypto_sign_ed25519_amd64_64_ORDER3
.globl crypto_sign_ed25519_amd64_64_EC2D0
.globl crypto_sign_ed25519_amd64_64_EC2D1
.globl crypto_sign_ed25519_amd64_64_EC2D2
.globl crypto_sign_ed25519_amd64_64_EC2D3
.globl crypto_sign_ed25519_amd64_64_38
.p2align 4
crypto_sign_ed25519_amd64_64_121666: .quad 121666
crypto_sign_ed25519_amd64_64_MU0: .quad 0xED9CE5A30A2C131B
crypto_sign_ed25519_amd64_64_MU1: .quad 0x2106215D086329A7
crypto_sign_ed25519_amd64_64_MU2: .quad 0xFFFFFFFFFFFFFFEB
crypto_sign_ed25519_amd64_64_MU3: .quad 0xFFFFFFFFFFFFFFFF
crypto_sign_ed25519_amd64_64_MU4: .quad 0x000000000000000F
crypto_sign_ed25519_amd64_64_ORDER0: .quad 0x5812631A5CF5D3ED
crypto_sign_ed25519_amd64_64_ORDER1: .quad 0x14DEF9DEA2F79CD6
crypto_sign_ed25519_amd64_64_ORDER2: .quad 0x0000000000000000
crypto_sign_ed25519_amd64_64_ORDER3: .quad 0x1000000000000000
crypto_sign_ed25519_amd64_64_EC2D0: .quad 0xEBD69B9426B2F146
crypto_sign_ed25519_amd64_64_EC2D1: .quad 0x00E0149A8283B156
crypto_sign_ed25519_amd64_64_EC2D2: .quad 0x198E80F2EEF3D130
crypto_sign_ed25519_amd64_64_EC2D3: .quad 0xA406D9DC56DFFCE7
crypto_sign_ed25519_amd64_64_38: .quad 38

View File

@ -0,0 +1,64 @@
#ifndef FE25519_H
#define FE25519_H
#define fe25519 crypto_sign_ed25519_amd64_64_fe25519
#define fe25519_freeze crypto_sign_ed25519_amd64_64_fe25519_freeze
#define fe25519_unpack crypto_sign_ed25519_amd64_64_fe25519_unpack
#define fe25519_pack crypto_sign_ed25519_amd64_64_fe25519_pack
#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_64_fe25519_iszero_vartime
#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_64_fe25519_iseq_vartime
#define fe25519_cmov crypto_sign_ed25519_amd64_64_fe25519_cmov
#define fe25519_setint crypto_sign_ed25519_amd64_64_fe25519_setint
#define fe25519_neg crypto_sign_ed25519_amd64_64_fe25519_neg
#define fe25519_getparity crypto_sign_ed25519_amd64_64_fe25519_getparity
#define fe25519_add crypto_sign_ed25519_amd64_64_fe25519_add
#define fe25519_sub crypto_sign_ed25519_amd64_64_fe25519_sub
#define fe25519_mul crypto_sign_ed25519_amd64_64_fe25519_mul
#define fe25519_mul121666 crypto_sign_ed25519_amd64_64_fe25519_mul121666
#define fe25519_square crypto_sign_ed25519_amd64_64_fe25519_square
#define fe25519_invert crypto_sign_ed25519_amd64_64_fe25519_invert
#define fe25519_pow2523 crypto_sign_ed25519_amd64_64_fe25519_pow2523
typedef struct
{
unsigned long long v[4];
}
fe25519;
void fe25519_freeze(fe25519 *r);
void fe25519_unpack(fe25519 *r, const unsigned char x[32]);
void fe25519_pack(unsigned char r[32], const fe25519 *x);
void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b);
void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b);
void fe25519_setint(fe25519 *r, unsigned int v);
void fe25519_neg(fe25519 *r, const fe25519 *x);
unsigned char fe25519_getparity(const fe25519 *x);
int fe25519_iszero_vartime(const fe25519 *x);
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y);
void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul121666(fe25519 *r, const fe25519 *x);
void fe25519_square(fe25519 *r, const fe25519 *x);
void fe25519_pow(fe25519 *r, const fe25519 *x, const unsigned char *e);
void fe25519_invert(fe25519 *r, const fe25519 *x);
void fe25519_pow2523(fe25519 *r, const fe25519 *x);
#endif

View File

@ -0,0 +1,189 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 addt0
# qhasm: int64 addt1
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_add
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_add
.globl crypto_sign_ed25519_amd64_64_fe25519_add
_crypto_sign_ed25519_amd64_64_fe25519_add:
crypto_sign_ed25519_amd64_64_fe25519_add:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 += *(uint64 *)(yp + 0)
# asm 1: addq 0(<yp=int64#3),<r0=int64#4
# asm 2: addq 0(<yp=%rdx),<r0=%rcx
addq 0(%rdx),%rcx
# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry
# asm 1: adcq 8(<yp=int64#3),<r1=int64#5
# asm 2: adcq 8(<yp=%rdx),<r1=%r8
adcq 8(%rdx),%r8
# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry
# asm 1: adcq 16(<yp=int64#3),<r2=int64#6
# asm 2: adcq 16(<yp=%rdx),<r2=%r9
adcq 16(%rdx),%r9
# qhasm: carry? r3 += *(uint64 *)(yp + 24) + carry
# asm 1: adcq 24(<yp=int64#3),<r3=int64#2
# asm 2: adcq 24(<yp=%rdx),<r3=%rsi
adcq 24(%rdx),%rsi
# qhasm: addt0 = 0
# asm 1: mov $0,>addt0=int64#3
# asm 2: mov $0,>addt0=%rdx
mov $0,%rdx
# qhasm: addt1 = 38
# asm 1: mov $38,>addt1=int64#7
# asm 2: mov $38,>addt1=%rax
mov $38,%rax
# qhasm: addt1 = addt0 if !carry
# asm 1: cmovae <addt0=int64#3,<addt1=int64#7
# asm 2: cmovae <addt0=%rdx,<addt1=%rax
cmovae %rdx,%rax
# qhasm: carry? r0 += addt1
# asm 1: add <addt1=int64#7,<r0=int64#4
# asm 2: add <addt1=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: carry? r1 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r1=int64#5
# asm 2: adc <addt0=%rdx,<r1=%r8
adc %rdx,%r8
# qhasm: carry? r2 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r2=int64#6
# asm 2: adc <addt0=%rdx,<r2=%r9
adc %rdx,%r9
# qhasm: carry? r3 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r3=int64#2
# asm 2: adc <addt0=%rdx,<r3=%rsi
adc %rdx,%rsi
# qhasm: addt0 = addt1 if carry
# asm 1: cmovc <addt1=int64#7,<addt0=int64#3
# asm 2: cmovc <addt1=%rax,<addt0=%rdx
cmovc %rax,%rdx
# qhasm: r0 += addt0
# asm 1: add <addt0=int64#3,<r0=int64#4
# asm 2: add <addt0=%rdx,<r0=%rcx
add %rdx,%rcx
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,322 @@
# qhasm: int64 rp
# qhasm: input rp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 two63
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_freeze
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_freeze
.globl crypto_sign_ed25519_amd64_64_fe25519_freeze
_crypto_sign_ed25519_amd64_64_fe25519_freeze:
crypto_sign_ed25519_amd64_64_fe25519_freeze:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: r0 = *(uint64 *) (rp + 0)
# asm 1: movq 0(<rp=int64#1),>r0=int64#2
# asm 2: movq 0(<rp=%rdi),>r0=%rsi
movq 0(%rdi),%rsi
# qhasm: r1 = *(uint64 *) (rp + 8)
# asm 1: movq 8(<rp=int64#1),>r1=int64#3
# asm 2: movq 8(<rp=%rdi),>r1=%rdx
movq 8(%rdi),%rdx
# qhasm: r2 = *(uint64 *) (rp + 16)
# asm 1: movq 16(<rp=int64#1),>r2=int64#4
# asm 2: movq 16(<rp=%rdi),>r2=%rcx
movq 16(%rdi),%rcx
# qhasm: r3 = *(uint64 *) (rp + 24)
# asm 1: movq 24(<rp=int64#1),>r3=int64#5
# asm 2: movq 24(<rp=%rdi),>r3=%r8
movq 24(%rdi),%r8
# qhasm: t0 = r0
# asm 1: mov <r0=int64#2,>t0=int64#6
# asm 2: mov <r0=%rsi,>t0=%r9
mov %rsi,%r9
# qhasm: t1 = r1
# asm 1: mov <r1=int64#3,>t1=int64#7
# asm 2: mov <r1=%rdx,>t1=%rax
mov %rdx,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#4,>t2=int64#8
# asm 2: mov <r2=%rcx,>t2=%r10
mov %rcx,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#5,>t3=int64#9
# asm 2: mov <r3=%r8,>t3=%r11
mov %r8,%r11
# qhasm: two63 = 1
# asm 1: mov $1,>two63=int64#10
# asm 2: mov $1,>two63=%r12
mov $1,%r12
# qhasm: two63 <<= 63
# asm 1: shl $63,<two63=int64#10
# asm 2: shl $63,<two63=%r12
shl $63,%r12
# qhasm: carry? t0 += 19
# asm 1: add $19,<t0=int64#6
# asm 2: add $19,<t0=%r9
add $19,%r9
# qhasm: carry? t1 += 0 + carry
# asm 1: adc $0,<t1=int64#7
# asm 2: adc $0,<t1=%rax
adc $0,%rax
# qhasm: carry? t2 += 0 + carry
# asm 1: adc $0,<t2=int64#8
# asm 2: adc $0,<t2=%r10
adc $0,%r10
# qhasm: carry? t3 += two63 + carry
# asm 1: adc <two63=int64#10,<t3=int64#9
# asm 2: adc <two63=%r12,<t3=%r11
adc %r12,%r11
# qhasm: r0 = t0 if carry
# asm 1: cmovc <t0=int64#6,<r0=int64#2
# asm 2: cmovc <t0=%r9,<r0=%rsi
cmovc %r9,%rsi
# qhasm: r1 = t1 if carry
# asm 1: cmovc <t1=int64#7,<r1=int64#3
# asm 2: cmovc <t1=%rax,<r1=%rdx
cmovc %rax,%rdx
# qhasm: r2 = t2 if carry
# asm 1: cmovc <t2=int64#8,<r2=int64#4
# asm 2: cmovc <t2=%r10,<r2=%rcx
cmovc %r10,%rcx
# qhasm: r3 = t3 if carry
# asm 1: cmovc <t3=int64#9,<r3=int64#5
# asm 2: cmovc <t3=%r11,<r3=%r8
cmovc %r11,%r8
# qhasm: t0 = r0
# asm 1: mov <r0=int64#2,>t0=int64#6
# asm 2: mov <r0=%rsi,>t0=%r9
mov %rsi,%r9
# qhasm: t1 = r1
# asm 1: mov <r1=int64#3,>t1=int64#7
# asm 2: mov <r1=%rdx,>t1=%rax
mov %rdx,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#4,>t2=int64#8
# asm 2: mov <r2=%rcx,>t2=%r10
mov %rcx,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#5,>t3=int64#9
# asm 2: mov <r3=%r8,>t3=%r11
mov %r8,%r11
# qhasm: carry? t0 += 19
# asm 1: add $19,<t0=int64#6
# asm 2: add $19,<t0=%r9
add $19,%r9
# qhasm: carry? t1 += 0 + carry
# asm 1: adc $0,<t1=int64#7
# asm 2: adc $0,<t1=%rax
adc $0,%rax
# qhasm: carry? t2 += 0 + carry
# asm 1: adc $0,<t2=int64#8
# asm 2: adc $0,<t2=%r10
adc $0,%r10
# qhasm: carry? t3 += two63 + carry
# asm 1: adc <two63=int64#10,<t3=int64#9
# asm 2: adc <two63=%r12,<t3=%r11
adc %r12,%r11
# qhasm: r0 = t0 if carry
# asm 1: cmovc <t0=int64#6,<r0=int64#2
# asm 2: cmovc <t0=%r9,<r0=%rsi
cmovc %r9,%rsi
# qhasm: r1 = t1 if carry
# asm 1: cmovc <t1=int64#7,<r1=int64#3
# asm 2: cmovc <t1=%rax,<r1=%rdx
cmovc %rax,%rdx
# qhasm: r2 = t2 if carry
# asm 1: cmovc <t2=int64#8,<r2=int64#4
# asm 2: cmovc <t2=%r10,<r2=%rcx
cmovc %r10,%rcx
# qhasm: r3 = t3 if carry
# asm 1: cmovc <t3=int64#9,<r3=int64#5
# asm 2: cmovc <t3=%r11,<r3=%r8
cmovc %r11,%r8
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#2,0(<rp=int64#1)
# asm 2: movq <r0=%rsi,0(<rp=%rdi)
movq %rsi,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#3,8(<rp=int64#1)
# asm 2: movq <r1=%rdx,8(<rp=%rdi)
movq %rdx,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#4,16(<rp=int64#1)
# asm 2: movq <r2=%rcx,16(<rp=%rdi)
movq %rcx,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#5,24(<rp=int64#1)
# asm 2: movq <r3=%r8,24(<rp=%rdi)
movq %r8,24(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,8 @@
#include "fe25519.h"
unsigned char fe25519_getparity(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
return (unsigned char)t.v[0] & 1;
}

View File

@ -0,0 +1,60 @@
#include "fe25519.h"
void fe25519_invert(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
fe25519 z11;
fe25519 z2_5_0;
fe25519 z2_10_0;
fe25519 z2_20_0;
fe25519 z2_50_0;
fe25519 z2_100_0;
fe25519 t;
int i;
/* 2 */ fe25519_square(&z2,x);
/* 4 */ fe25519_square(&t,&z2);
/* 8 */ fe25519_square(&t,&t);
/* 9 */ fe25519_mul(&z9,&t,x);
/* 11 */ fe25519_mul(&z11,&z9,&z2);
/* 22 */ fe25519_square(&t,&z11);
/* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9);
/* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0);
/* 2^20 - 2^10 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); }
/* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0);
/* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0);
/* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0);
/* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0);
/* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); }
/* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0);
/* 2^41 - 2^1 */ fe25519_square(&t,&t);
/* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0);
/* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0);
/* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0);
/* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0);
/* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); }
/* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0);
/* 2^201 - 2^1 */ fe25519_square(&t,&t);
/* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0);
/* 2^251 - 2^1 */ fe25519_square(&t,&t);
/* 2^252 - 2^2 */ fe25519_square(&t,&t);
/* 2^253 - 2^3 */ fe25519_square(&t,&t);
/* 2^254 - 2^4 */ fe25519_square(&t,&t);
/* 2^255 - 2^5 */ fe25519_square(&t,&t);
/* 2^255 - 21 */ fe25519_mul(r,&t,&z11);
}

View File

@ -0,0 +1,14 @@
#include "fe25519.h"
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
{
fe25519 t1 = *x;
fe25519 t2 = *y;
fe25519_freeze(&t1);
fe25519_freeze(&t2);
if(t1.v[0] != t2.v[0]) return 0;
if(t1.v[1] != t2.v[1]) return 0;
if(t1.v[2] != t2.v[2]) return 0;
if(t1.v[3] != t2.v[3]) return 0;
return 1;
}

View File

@ -0,0 +1,12 @@
#include "fe25519.h"
int fe25519_iszero_vartime(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
if (t.v[0]) return 0;
if (t.v[1]) return 0;
if (t.v[2]) return 0;
if (t.v[3]) return 0;
return 1;
}

View File

@ -0,0 +1,865 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: int64 mulr4
# qhasm: int64 mulr5
# qhasm: int64 mulr6
# qhasm: int64 mulr7
# qhasm: int64 mulr8
# qhasm: int64 mulrax
# qhasm: int64 mulrdx
# qhasm: int64 mulx0
# qhasm: int64 mulx1
# qhasm: int64 mulx2
# qhasm: int64 mulx3
# qhasm: int64 mulc
# qhasm: int64 mulzero
# qhasm: int64 muli38
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_mul
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_mul
.globl crypto_sign_ed25519_amd64_64_fe25519_mul
_crypto_sign_ed25519_amd64_64_fe25519_mul:
crypto_sign_ed25519_amd64_64_fe25519_mul:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: yp = yp
# asm 1: mov <yp=int64#3,>yp=int64#4
# asm 2: mov <yp=%rdx,>yp=%rcx
mov %rdx,%rcx
# qhasm: mulr4 = 0
# asm 1: mov $0,>mulr4=int64#5
# asm 2: mov $0,>mulr4=%r8
mov $0,%r8
# qhasm: mulr5 = 0
# asm 1: mov $0,>mulr5=int64#6
# asm 2: mov $0,>mulr5=%r9
mov $0,%r9
# qhasm: mulr6 = 0
# asm 1: mov $0,>mulr6=int64#8
# asm 2: mov $0,>mulr6=%r10
mov $0,%r10
# qhasm: mulr7 = 0
# asm 1: mov $0,>mulr7=int64#9
# asm 2: mov $0,>mulr7=%r11
mov $0,%r11
# qhasm: mulx0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulx0=int64#10
# asm 2: movq 0(<xp=%rsi),>mulx0=%r12
movq 0(%rsi),%r12
# qhasm: mulrax = *(uint64 *)(yp + 0)
# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 0(<yp=%rcx),>mulrax=%rax
movq 0(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
# asm 2: mul <mulx0=%r12
mul %r12
# qhasm: r0 = mulrax
# asm 1: mov <mulrax=int64#7,>r0=int64#11
# asm 2: mov <mulrax=%rax,>r0=%r13
mov %rax,%r13
# qhasm: r1 = mulrdx
# asm 1: mov <mulrdx=int64#3,>r1=int64#12
# asm 2: mov <mulrdx=%rdx,>r1=%r14
mov %rdx,%r14
# qhasm: mulrax = *(uint64 *)(yp + 8)
# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 8(<yp=%rcx),>mulrax=%rax
movq 8(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
# asm 2: mul <mulx0=%r12
mul %r12
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#12
# asm 2: add <mulrax=%rax,<r1=%r14
add %rax,%r14
# qhasm: r2 = 0
# asm 1: mov $0,>r2=int64#13
# asm 2: mov $0,>r2=%r15
mov $0,%r15
# qhasm: r2 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<r2=int64#13
# asm 2: adc <mulrdx=%rdx,<r2=%r15
adc %rdx,%r15
# qhasm: mulrax = *(uint64 *)(yp + 16)
# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 16(<yp=%rcx),>mulrax=%rax
movq 16(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
# asm 2: mul <mulx0=%r12
mul %r12
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#13
# asm 2: add <mulrax=%rax,<r2=%r15
add %rax,%r15
# qhasm: r3 = 0
# asm 1: mov $0,>r3=int64#14
# asm 2: mov $0,>r3=%rbx
mov $0,%rbx
# qhasm: r3 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<r3=int64#14
# asm 2: adc <mulrdx=%rdx,<r3=%rbx
adc %rdx,%rbx
# qhasm: mulrax = *(uint64 *)(yp + 24)
# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 24(<yp=%rcx),>mulrax=%rax
movq 24(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
# asm 2: mul <mulx0=%r12
mul %r12
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#14
# asm 2: add <mulrax=%rax,<r3=%rbx
add %rax,%rbx
# qhasm: mulr4 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
# asm 2: adc <mulrdx=%rdx,<mulr4=%r8
adc %rdx,%r8
# qhasm: mulx1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulx1=int64#10
# asm 2: movq 8(<xp=%rsi),>mulx1=%r12
movq 8(%rsi),%r12
# qhasm: mulrax = *(uint64 *)(yp + 0)
# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 0(<yp=%rcx),>mulrax=%rax
movq 0(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
# asm 2: mul <mulx1=%r12
mul %r12
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#12
# asm 2: add <mulrax=%rax,<r1=%r14
add %rax,%r14
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 8)
# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 8(<yp=%rcx),>mulrax=%rax
movq 8(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
# asm 2: mul <mulx1=%r12
mul %r12
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#13
# asm 2: add <mulrax=%rax,<r2=%r15
add %rax,%r15
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? r2 += mulc
# asm 1: add <mulc=int64#15,<r2=int64#13
# asm 2: add <mulc=%rbp,<r2=%r15
add %rbp,%r15
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 16)
# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 16(<yp=%rcx),>mulrax=%rax
movq 16(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
# asm 2: mul <mulx1=%r12
mul %r12
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#14
# asm 2: add <mulrax=%rax,<r3=%rbx
add %rax,%rbx
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? r3 += mulc
# asm 1: add <mulc=int64#15,<r3=int64#14
# asm 2: add <mulc=%rbp,<r3=%rbx
add %rbp,%rbx
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 24)
# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 24(<yp=%rcx),>mulrax=%rax
movq 24(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
# asm 2: mul <mulx1=%r12
mul %r12
# qhasm: carry? mulr4 += mulrax
# asm 1: add <mulrax=int64#7,<mulr4=int64#5
# asm 2: add <mulrax=%rax,<mulr4=%r8
add %rax,%r8
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr4 += mulc
# asm 1: add <mulc=int64#15,<mulr4=int64#5
# asm 2: add <mulc=%rbp,<mulr4=%r8
add %rbp,%r8
# qhasm: mulr5 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr5=%r9
adc %rdx,%r9
# qhasm: mulx2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulx2=int64#10
# asm 2: movq 16(<xp=%rsi),>mulx2=%r12
movq 16(%rsi),%r12
# qhasm: mulrax = *(uint64 *)(yp + 0)
# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 0(<yp=%rcx),>mulrax=%rax
movq 0(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
# asm 2: mul <mulx2=%r12
mul %r12
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#13
# asm 2: add <mulrax=%rax,<r2=%r15
add %rax,%r15
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 8)
# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 8(<yp=%rcx),>mulrax=%rax
movq 8(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
# asm 2: mul <mulx2=%r12
mul %r12
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#14
# asm 2: add <mulrax=%rax,<r3=%rbx
add %rax,%rbx
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? r3 += mulc
# asm 1: add <mulc=int64#15,<r3=int64#14
# asm 2: add <mulc=%rbp,<r3=%rbx
add %rbp,%rbx
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 16)
# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 16(<yp=%rcx),>mulrax=%rax
movq 16(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
# asm 2: mul <mulx2=%r12
mul %r12
# qhasm: carry? mulr4 += mulrax
# asm 1: add <mulrax=int64#7,<mulr4=int64#5
# asm 2: add <mulrax=%rax,<mulr4=%r8
add %rax,%r8
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr4 += mulc
# asm 1: add <mulc=int64#15,<mulr4=int64#5
# asm 2: add <mulc=%rbp,<mulr4=%r8
add %rbp,%r8
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#15
# asm 2: mov $0,>mulc=%rbp
mov $0,%rbp
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(yp + 24)
# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 24(<yp=%rcx),>mulrax=%rax
movq 24(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
# asm 2: mul <mulx2=%r12
mul %r12
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
# asm 2: add <mulrax=%rax,<mulr5=%r9
add %rax,%r9
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr5 += mulc
# asm 1: add <mulc=int64#15,<mulr5=int64#6
# asm 2: add <mulc=%rbp,<mulr5=%r9
add %rbp,%r9
# qhasm: mulr6 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
adc %rdx,%r10
# qhasm: mulx3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>mulx3=int64#2
# asm 2: movq 24(<xp=%rsi),>mulx3=%rsi
movq 24(%rsi),%rsi
# qhasm: mulrax = *(uint64 *)(yp + 0)
# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 0(<yp=%rcx),>mulrax=%rax
movq 0(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#2
# asm 2: mul <mulx3=%rsi
mul %rsi
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#14
# asm 2: add <mulrax=%rax,<r3=%rbx
add %rax,%rbx
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#10
# asm 2: mov $0,>mulc=%r12
mov $0,%r12
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
# asm 2: adc <mulrdx=%rdx,<mulc=%r12
adc %rdx,%r12
# qhasm: mulrax = *(uint64 *)(yp + 8)
# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 8(<yp=%rcx),>mulrax=%rax
movq 8(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#2
# asm 2: mul <mulx3=%rsi
mul %rsi
# qhasm: carry? mulr4 += mulrax
# asm 1: add <mulrax=int64#7,<mulr4=int64#5
# asm 2: add <mulrax=%rax,<mulr4=%r8
add %rax,%r8
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr4 += mulc
# asm 1: add <mulc=int64#10,<mulr4=int64#5
# asm 2: add <mulc=%r12,<mulr4=%r8
add %r12,%r8
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#10
# asm 2: mov $0,>mulc=%r12
mov $0,%r12
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
# asm 2: adc <mulrdx=%rdx,<mulc=%r12
adc %rdx,%r12
# qhasm: mulrax = *(uint64 *)(yp + 16)
# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 16(<yp=%rcx),>mulrax=%rax
movq 16(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#2
# asm 2: mul <mulx3=%rsi
mul %rsi
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
# asm 2: add <mulrax=%rax,<mulr5=%r9
add %rax,%r9
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr5 += mulc
# asm 1: add <mulc=int64#10,<mulr5=int64#6
# asm 2: add <mulc=%r12,<mulr5=%r9
add %r12,%r9
# qhasm: mulc = 0
# asm 1: mov $0,>mulc=int64#10
# asm 2: mov $0,>mulc=%r12
mov $0,%r12
# qhasm: mulc += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
# asm 2: adc <mulrdx=%rdx,<mulc=%r12
adc %rdx,%r12
# qhasm: mulrax = *(uint64 *)(yp + 24)
# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7
# asm 2: movq 24(<yp=%rcx),>mulrax=%rax
movq 24(%rcx),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#2
# asm 2: mul <mulx3=%rsi
mul %rsi
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
# asm 2: add <mulrax=%rax,<mulr6=%r10
add %rax,%r10
# qhasm: mulrdx += 0 + carry
# asm 1: adc $0,<mulrdx=int64#3
# asm 2: adc $0,<mulrdx=%rdx
adc $0,%rdx
# qhasm: carry? mulr6 += mulc
# asm 1: add <mulc=int64#10,<mulr6=int64#8
# asm 2: add <mulc=%r12,<mulr6=%r10
add %r12,%r10
# qhasm: mulr7 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
adc %rdx,%r11
# qhasm: mulrax = mulr4
# asm 1: mov <mulr4=int64#5,>mulrax=int64#7
# asm 2: mov <mulr4=%r8,>mulrax=%rax
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
# asm 2: mov <mulrax=%rax,>mulr4=%rsi
mov %rax,%rsi
# qhasm: mulrax = mulr5
# asm 1: mov <mulr5=int64#6,>mulrax=int64#7
# asm 2: mov <mulr5=%r9,>mulrax=%rax
mov %r9,%rax
# qhasm: mulr5 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
# asm 2: add <mulrax=%rax,<mulr5=%rcx
add %rax,%rcx
# qhasm: mulrax = mulr6
# asm 1: mov <mulr6=int64#8,>mulrax=int64#7
# asm 2: mov <mulr6=%r10,>mulrax=%rax
mov %r10,%rax
# qhasm: mulr6 = 0
# asm 1: mov $0,>mulr6=int64#5
# asm 2: mov $0,>mulr6=%r8
mov $0,%r8
# qhasm: mulr6 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
# asm 2: add <mulrax=%rax,<mulr6=%r8
add %rax,%r8
# qhasm: mulrax = mulr7
# asm 1: mov <mulr7=int64#9,>mulrax=int64#7
# asm 2: mov <mulr7=%r11,>mulrax=%rax
mov %r11,%rax
# qhasm: mulr7 = 0
# asm 1: mov $0,>mulr7=int64#6
# asm 2: mov $0,>mulr7=%r9
mov $0,%r9
# qhasm: mulr7 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6
# asm 2: add <mulrax=%rax,<mulr7=%r9
add %rax,%r9
# qhasm: mulr8 = 0
# asm 1: mov $0,>mulr8=int64#7
# asm 2: mov $0,>mulr8=%rax
mov $0,%rax
# qhasm: mulr8 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
adc %rdx,%rax
# qhasm: carry? r0 += mulr4
# asm 1: add <mulr4=int64#2,<r0=int64#11
# asm 2: add <mulr4=%rsi,<r0=%r13
add %rsi,%r13
# qhasm: carry? r1 += mulr5 + carry
# asm 1: adc <mulr5=int64#4,<r1=int64#12
# asm 2: adc <mulr5=%rcx,<r1=%r14
adc %rcx,%r14
# qhasm: carry? r2 += mulr6 + carry
# asm 1: adc <mulr6=int64#5,<r2=int64#13
# asm 2: adc <mulr6=%r8,<r2=%r15
adc %r8,%r15
# qhasm: carry? r3 += mulr7 + carry
# asm 1: adc <mulr7=int64#6,<r3=int64#14
# asm 2: adc <mulr7=%r9,<r3=%rbx
adc %r9,%rbx
# qhasm: mulzero = 0
# asm 1: mov $0,>mulzero=int64#2
# asm 2: mov $0,>mulzero=%rsi
mov $0,%rsi
# qhasm: mulr8 += mulzero + carry
# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
# asm 2: adc <mulzero=%rsi,<mulr8=%rax
adc %rsi,%rax
# qhasm: mulr8 *= 38
# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
imulq $38,%rax,%rdx
# qhasm: carry? r0 += mulr8
# asm 1: add <mulr8=int64#3,<r0=int64#11
# asm 2: add <mulr8=%rdx,<r0=%r13
add %rdx,%r13
# qhasm: carry? r1 += mulzero + carry
# asm 1: adc <mulzero=int64#2,<r1=int64#12
# asm 2: adc <mulzero=%rsi,<r1=%r14
adc %rsi,%r14
# qhasm: carry? r2 += mulzero + carry
# asm 1: adc <mulzero=int64#2,<r2=int64#13
# asm 2: adc <mulzero=%rsi,<r2=%r15
adc %rsi,%r15
# qhasm: carry? r3 += mulzero + carry
# asm 1: adc <mulzero=int64#2,<r3=int64#14
# asm 2: adc <mulzero=%rsi,<r3=%rbx
adc %rsi,%rbx
# qhasm: mulzero += mulzero + carry
# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
adc %rsi,%rsi
# qhasm: mulzero *= 38
# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
imulq $38,%rsi,%rsi
# qhasm: r0 += mulzero
# asm 1: add <mulzero=int64#2,<r0=int64#11
# asm 2: add <mulzero=%rsi,<r0=%r13
add %rsi,%r13
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#12,8(<rp=int64#1)
# asm 2: movq <r1=%r14,8(<rp=%rdi)
movq %r14,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#13,16(<rp=int64#1)
# asm 2: movq <r2=%r15,16(<rp=%rdi)
movq %r15,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#14,24(<rp=int64#1)
# asm 2: movq <r3=%rbx,24(<rp=%rdi)
movq %rbx,24(%rdi)
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#11,0(<rp=int64#1)
# asm 2: movq <r0=%r13,0(<rp=%rdi)
movq %r13,0(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,8 @@
#include "fe25519.h"
void fe25519_neg(fe25519 *r, const fe25519 *x)
{
fe25519 t;
fe25519_setint(&t,0);
fe25519_sub(r,&t,x);
}

View File

@ -0,0 +1,13 @@
#include "fe25519.h"
/* Assumes input x being reduced below 2^255 */
void fe25519_pack(unsigned char r[32], const fe25519 *x)
{
int i;
fe25519 t;
t = *x;
fe25519_freeze(&t);
/* assuming little-endian */
for(i=0;i<32;i++) r[i] = i[(unsigned char *)&t.v];
}

View File

@ -0,0 +1,55 @@
#include "fe25519.h"
void fe25519_pow2523(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
fe25519 z11;
fe25519 z2_5_0;
fe25519 z2_10_0;
fe25519 z2_20_0;
fe25519 z2_50_0;
fe25519 z2_100_0;
fe25519 t;
int i;
/* 2 */ fe25519_square(&z2,x);
/* 4 */ fe25519_square(&t,&z2);
/* 8 */ fe25519_square(&t,&t);
/* 9 */ fe25519_mul(&z9,&t,x);
/* 11 */ fe25519_mul(&z11,&z9,&z2);
/* 22 */ fe25519_square(&t,&z11);
/* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9);
/* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0);
/* 2^10 - 2^5 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); }
/* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0);
/* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0);
/* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0);
/* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0);
/* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); }
/* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0);
/* 2^41 - 2^1 */ fe25519_square(&t,&t);
/* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0);
/* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0);
/* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0);
/* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0);
/* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); }
/* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0);
/* 2^201 - 2^1 */ fe25519_square(&t,&t);
/* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0);
/* 2^251 - 2^1 */ fe25519_square(&t,&t);
/* 2^252 - 2^2 */ fe25519_square(&t,&t);
/* 2^252 - 3 */ fe25519_mul(r,&t,x);
}

View File

@ -0,0 +1,9 @@
#include "fe25519.h"
void fe25519_setint(fe25519 *r, unsigned int v)
{
r->v[0] = v;
r->v[1] = 0;
r->v[2] = 0;
r->v[3] = 0;
}

View File

@ -0,0 +1,639 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: input rp
# qhasm: input xp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: int64 squarer4
# qhasm: int64 squarer5
# qhasm: int64 squarer6
# qhasm: int64 squarer7
# qhasm: int64 squarer8
# qhasm: int64 squarerax
# qhasm: int64 squarerdx
# qhasm: int64 squaret1
# qhasm: int64 squaret2
# qhasm: int64 squaret3
# qhasm: int64 squarec
# qhasm: int64 squarezero
# qhasm: int64 squarei38
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_square
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_square
.globl crypto_sign_ed25519_amd64_64_fe25519_square
_crypto_sign_ed25519_amd64_64_fe25519_square:
crypto_sign_ed25519_amd64_64_fe25519_square:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: squarer7 = 0
# asm 1: mov $0,>squarer7=int64#4
# asm 2: mov $0,>squarer7=%rcx
mov $0,%rcx
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 8(<xp=%rsi),>squarerax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0)
# asm 1: mulq 0(<xp=int64#2)
# asm 2: mulq 0(<xp=%rsi)
mulq 0(%rsi)
# qhasm: r1 = squarerax
# asm 1: mov <squarerax=int64#7,>r1=int64#5
# asm 2: mov <squarerax=%rax,>r1=%r8
mov %rax,%r8
# qhasm: r2 = squarerdx
# asm 1: mov <squarerdx=int64#3,>r2=int64#6
# asm 2: mov <squarerdx=%rdx,>r2=%r9
mov %rdx,%r9
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 16(<xp=%rsi),>squarerax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8)
# asm 1: mulq 8(<xp=int64#2)
# asm 2: mulq 8(<xp=%rsi)
mulq 8(%rsi)
# qhasm: r3 = squarerax
# asm 1: mov <squarerax=int64#7,>r3=int64#8
# asm 2: mov <squarerax=%rax,>r3=%r10
mov %rax,%r10
# qhasm: squarer4 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9
# asm 2: mov <squarerdx=%rdx,>squarer4=%r11
mov %rdx,%r11
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 24(<xp=%rsi),>squarerax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16)
# asm 1: mulq 16(<xp=int64#2)
# asm 2: mulq 16(<xp=%rsi)
mulq 16(%rsi)
# qhasm: squarer5 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer5=int64#10
# asm 2: mov <squarerax=%rax,>squarer5=%r12
mov %rax,%r12
# qhasm: squarer6 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11
# asm 2: mov <squarerdx=%rdx,>squarer6=%r13
mov %rdx,%r13
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 16(<xp=%rsi),>squarerax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0)
# asm 1: mulq 0(<xp=int64#2)
# asm 2: mulq 0(<xp=%rsi)
mulq 0(%rsi)
# qhasm: carry? r2 += squarerax
# asm 1: add <squarerax=int64#7,<r2=int64#6
# asm 2: add <squarerax=%rax,<r2=%r9
add %rax,%r9
# qhasm: carry? r3 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<r3=int64#8
# asm 2: adc <squarerdx=%rdx,<r3=%r10
adc %rdx,%r10
# qhasm: squarer4 += 0 + carry
# asm 1: adc $0,<squarer4=int64#9
# asm 2: adc $0,<squarer4=%r11
adc $0,%r11
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 24(<xp=%rsi),>squarerax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8)
# asm 1: mulq 8(<xp=int64#2)
# asm 2: mulq 8(<xp=%rsi)
mulq 8(%rsi)
# qhasm: carry? squarer4 += squarerax
# asm 1: add <squarerax=int64#7,<squarer4=int64#9
# asm 2: add <squarerax=%rax,<squarer4=%r11
add %rax,%r11
# qhasm: carry? squarer5 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer5=%r12
adc %rdx,%r12
# qhasm: squarer6 += 0 + carry
# asm 1: adc $0,<squarer6=int64#11
# asm 2: adc $0,<squarer6=%r13
adc $0,%r13
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 24(<xp=%rsi),>squarerax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0)
# asm 1: mulq 0(<xp=int64#2)
# asm 2: mulq 0(<xp=%rsi)
mulq 0(%rsi)
# qhasm: carry? r3 += squarerax
# asm 1: add <squarerax=int64#7,<r3=int64#8
# asm 2: add <squarerax=%rax,<r3=%r10
add %rax,%r10
# qhasm: carry? squarer4 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9
# asm 2: adc <squarerdx=%rdx,<squarer4=%r11
adc %rdx,%r11
# qhasm: carry? squarer5 += 0 + carry
# asm 1: adc $0,<squarer5=int64#10
# asm 2: adc $0,<squarer5=%r12
adc $0,%r12
# qhasm: carry? squarer6 += 0 + carry
# asm 1: adc $0,<squarer6=int64#11
# asm 2: adc $0,<squarer6=%r13
adc $0,%r13
# qhasm: squarer7 += 0 + carry
# asm 1: adc $0,<squarer7=int64#4
# asm 2: adc $0,<squarer7=%rcx
adc $0,%rcx
# qhasm: carry? r1 += r1
# asm 1: add <r1=int64#5,<r1=int64#5
# asm 2: add <r1=%r8,<r1=%r8
add %r8,%r8
# qhasm: carry? r2 += r2 + carry
# asm 1: adc <r2=int64#6,<r2=int64#6
# asm 2: adc <r2=%r9,<r2=%r9
adc %r9,%r9
# qhasm: carry? r3 += r3 + carry
# asm 1: adc <r3=int64#8,<r3=int64#8
# asm 2: adc <r3=%r10,<r3=%r10
adc %r10,%r10
# qhasm: carry? squarer4 += squarer4 + carry
# asm 1: adc <squarer4=int64#9,<squarer4=int64#9
# asm 2: adc <squarer4=%r11,<squarer4=%r11
adc %r11,%r11
# qhasm: carry? squarer5 += squarer5 + carry
# asm 1: adc <squarer5=int64#10,<squarer5=int64#10
# asm 2: adc <squarer5=%r12,<squarer5=%r12
adc %r12,%r12
# qhasm: carry? squarer6 += squarer6 + carry
# asm 1: adc <squarer6=int64#11,<squarer6=int64#11
# asm 2: adc <squarer6=%r13,<squarer6=%r13
adc %r13,%r13
# qhasm: squarer7 += squarer7 + carry
# asm 1: adc <squarer7=int64#4,<squarer7=int64#4
# asm 2: adc <squarer7=%rcx,<squarer7=%rcx
adc %rcx,%rcx
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0)
# asm 1: mulq 0(<xp=int64#2)
# asm 2: mulq 0(<xp=%rsi)
mulq 0(%rsi)
# qhasm: r0 = squarerax
# asm 1: mov <squarerax=int64#7,>r0=int64#12
# asm 2: mov <squarerax=%rax,>r0=%r14
mov %rax,%r14
# qhasm: squaret1 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13
# asm 2: mov <squarerdx=%rdx,>squaret1=%r15
mov %rdx,%r15
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 8(<xp=%rsi),>squarerax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8)
# asm 1: mulq 8(<xp=int64#2)
# asm 2: mulq 8(<xp=%rsi)
mulq 8(%rsi)
# qhasm: squaret2 = squarerax
# asm 1: mov <squarerax=int64#7,>squaret2=int64#14
# asm 2: mov <squarerax=%rax,>squaret2=%rbx
mov %rax,%rbx
# qhasm: squaret3 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15
# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp
mov %rdx,%rbp
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 16(<xp=%rsi),>squarerax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16)
# asm 1: mulq 16(<xp=int64#2)
# asm 2: mulq 16(<xp=%rsi)
mulq 16(%rsi)
# qhasm: carry? r1 += squaret1
# asm 1: add <squaret1=int64#13,<r1=int64#5
# asm 2: add <squaret1=%r15,<r1=%r8
add %r15,%r8
# qhasm: carry? r2 += squaret2 + carry
# asm 1: adc <squaret2=int64#14,<r2=int64#6
# asm 2: adc <squaret2=%rbx,<r2=%r9
adc %rbx,%r9
# qhasm: carry? r3 += squaret3 + carry
# asm 1: adc <squaret3=int64#15,<r3=int64#8
# asm 2: adc <squaret3=%rbp,<r3=%r10
adc %rbp,%r10
# qhasm: carry? squarer4 += squarerax + carry
# asm 1: adc <squarerax=int64#7,<squarer4=int64#9
# asm 2: adc <squarerax=%rax,<squarer4=%r11
adc %rax,%r11
# qhasm: carry? squarer5 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer5=%r12
adc %rdx,%r12
# qhasm: carry? squarer6 += 0 + carry
# asm 1: adc $0,<squarer6=int64#11
# asm 2: adc $0,<squarer6=%r13
adc $0,%r13
# qhasm: squarer7 += 0 + carry
# asm 1: adc $0,<squarer7=int64#4
# asm 2: adc $0,<squarer7=%rcx
adc $0,%rcx
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 24(<xp=%rsi),>squarerax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24)
# asm 1: mulq 24(<xp=int64#2)
# asm 2: mulq 24(<xp=%rsi)
mulq 24(%rsi)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#11
# asm 2: add <squarerax=%rax,<squarer6=%r13
add %rax,%r13
# qhasm: squarer7 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4
# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx
adc %rdx,%rcx
# qhasm: squarerax = squarer4
# asm 1: mov <squarer4=int64#9,>squarerax=int64#7
# asm 2: mov <squarer4=%r11,>squarerax=%rax
mov %r11,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#2
# asm 2: mov <squarerax=%rax,>squarer4=%rsi
mov %rax,%rsi
# qhasm: squarerax = squarer5
# asm 1: mov <squarer5=int64#10,>squarerax=int64#7
# asm 2: mov <squarer5=%r12,>squarerax=%rax
mov %r12,%rax
# qhasm: squarer5 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer5=int64#9
# asm 2: mov <squarerdx=%rdx,>squarer5=%r11
mov %rdx,%r11
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#9
# asm 2: add <squarerax=%rax,<squarer5=%r11
add %rax,%r11
# qhasm: squarerax = squarer6
# asm 1: mov <squarer6=int64#11,>squarerax=int64#7
# asm 2: mov <squarer6=%r13,>squarerax=%rax
mov %r13,%rax
# qhasm: squarer6 = 0
# asm 1: mov $0,>squarer6=int64#10
# asm 2: mov $0,>squarer6=%r12
mov $0,%r12
# qhasm: squarer6 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer6=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer6=%r12
adc %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#10
# asm 2: add <squarerax=%rax,<squarer6=%r12
add %rax,%r12
# qhasm: squarerax = squarer7
# asm 1: mov <squarer7=int64#4,>squarerax=int64#7
# asm 2: mov <squarer7=%rcx,>squarerax=%rax
mov %rcx,%rax
# qhasm: squarer7 = 0
# asm 1: mov $0,>squarer7=int64#4
# asm 2: mov $0,>squarer7=%rcx
mov $0,%rcx
# qhasm: squarer7 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4
# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx
adc %rdx,%rcx
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#4
# asm 2: add <squarerax=%rax,<squarer7=%rcx
add %rax,%rcx
# qhasm: squarer8 = 0
# asm 1: mov $0,>squarer8=int64#7
# asm 2: mov $0,>squarer8=%rax
mov $0,%rax
# qhasm: squarer8 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7
# asm 2: adc <squarerdx=%rdx,<squarer8=%rax
adc %rdx,%rax
# qhasm: carry? r0 += squarer4
# asm 1: add <squarer4=int64#2,<r0=int64#12
# asm 2: add <squarer4=%rsi,<r0=%r14
add %rsi,%r14
# qhasm: carry? r1 += squarer5 + carry
# asm 1: adc <squarer5=int64#9,<r1=int64#5
# asm 2: adc <squarer5=%r11,<r1=%r8
adc %r11,%r8
# qhasm: carry? r2 += squarer6 + carry
# asm 1: adc <squarer6=int64#10,<r2=int64#6
# asm 2: adc <squarer6=%r12,<r2=%r9
adc %r12,%r9
# qhasm: carry? r3 += squarer7 + carry
# asm 1: adc <squarer7=int64#4,<r3=int64#8
# asm 2: adc <squarer7=%rcx,<r3=%r10
adc %rcx,%r10
# qhasm: squarezero = 0
# asm 1: mov $0,>squarezero=int64#2
# asm 2: mov $0,>squarezero=%rsi
mov $0,%rsi
# qhasm: squarer8 += squarezero + carry
# asm 1: adc <squarezero=int64#2,<squarer8=int64#7
# asm 2: adc <squarezero=%rsi,<squarer8=%rax
adc %rsi,%rax
# qhasm: squarer8 *= 38
# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#3
# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rdx
imulq $38,%rax,%rdx
# qhasm: carry? r0 += squarer8
# asm 1: add <squarer8=int64#3,<r0=int64#12
# asm 2: add <squarer8=%rdx,<r0=%r14
add %rdx,%r14
# qhasm: carry? r1 += squarezero + carry
# asm 1: adc <squarezero=int64#2,<r1=int64#5
# asm 2: adc <squarezero=%rsi,<r1=%r8
adc %rsi,%r8
# qhasm: carry? r2 += squarezero + carry
# asm 1: adc <squarezero=int64#2,<r2=int64#6
# asm 2: adc <squarezero=%rsi,<r2=%r9
adc %rsi,%r9
# qhasm: carry? r3 += squarezero + carry
# asm 1: adc <squarezero=int64#2,<r3=int64#8
# asm 2: adc <squarezero=%rsi,<r3=%r10
adc %rsi,%r10
# qhasm: squarezero += squarezero + carry
# asm 1: adc <squarezero=int64#2,<squarezero=int64#2
# asm 2: adc <squarezero=%rsi,<squarezero=%rsi
adc %rsi,%rsi
# qhasm: squarezero *= 38
# asm 1: imulq $38,<squarezero=int64#2,>squarezero=int64#2
# asm 2: imulq $38,<squarezero=%rsi,>squarezero=%rsi
imulq $38,%rsi,%rsi
# qhasm: r0 += squarezero
# asm 1: add <squarezero=int64#2,<r0=int64#12
# asm 2: add <squarezero=%rsi,<r0=%r14
add %rsi,%r14
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#8,24(<rp=int64#1)
# asm 2: movq <r3=%r10,24(<rp=%rdi)
movq %r10,24(%rdi)
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#12,0(<rp=int64#1)
# asm 2: movq <r0=%r14,0(<rp=%rdi)
movq %r14,0(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,189 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 subt0
# qhasm: int64 subt1
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_sub
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_sub
.globl crypto_sign_ed25519_amd64_64_fe25519_sub
_crypto_sign_ed25519_amd64_64_fe25519_sub:
crypto_sign_ed25519_amd64_64_fe25519_sub:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 -= *(uint64 *)(yp + 0)
# asm 1: subq 0(<yp=int64#3),<r0=int64#4
# asm 2: subq 0(<yp=%rdx),<r0=%rcx
subq 0(%rdx),%rcx
# qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry
# asm 1: sbbq 8(<yp=int64#3),<r1=int64#5
# asm 2: sbbq 8(<yp=%rdx),<r1=%r8
sbbq 8(%rdx),%r8
# qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry
# asm 1: sbbq 16(<yp=int64#3),<r2=int64#6
# asm 2: sbbq 16(<yp=%rdx),<r2=%r9
sbbq 16(%rdx),%r9
# qhasm: carry? r3 -= *(uint64 *)(yp + 24) - carry
# asm 1: sbbq 24(<yp=int64#3),<r3=int64#2
# asm 2: sbbq 24(<yp=%rdx),<r3=%rsi
sbbq 24(%rdx),%rsi
# qhasm: subt0 = 0
# asm 1: mov $0,>subt0=int64#3
# asm 2: mov $0,>subt0=%rdx
mov $0,%rdx
# qhasm: subt1 = 38
# asm 1: mov $38,>subt1=int64#7
# asm 2: mov $38,>subt1=%rax
mov $38,%rax
# qhasm: subt1 = subt0 if !carry
# asm 1: cmovae <subt0=int64#3,<subt1=int64#7
# asm 2: cmovae <subt0=%rdx,<subt1=%rax
cmovae %rdx,%rax
# qhasm: carry? r0 -= subt1
# asm 1: sub <subt1=int64#7,<r0=int64#4
# asm 2: sub <subt1=%rax,<r0=%rcx
sub %rax,%rcx
# qhasm: carry? r1 -= subt0 - carry
# asm 1: sbb <subt0=int64#3,<r1=int64#5
# asm 2: sbb <subt0=%rdx,<r1=%r8
sbb %rdx,%r8
# qhasm: carry? r2 -= subt0 - carry
# asm 1: sbb <subt0=int64#3,<r2=int64#6
# asm 2: sbb <subt0=%rdx,<r2=%r9
sbb %rdx,%r9
# qhasm: carry? r3 -= subt0 - carry
# asm 1: sbb <subt0=int64#3,<r3=int64#2
# asm 2: sbb <subt0=%rdx,<r3=%rsi
sbb %rdx,%rsi
# qhasm: subt0 = subt1 if carry
# asm 1: cmovc <subt1=int64#7,<subt0=int64#3
# asm 2: cmovc <subt1=%rax,<subt0=%rdx
cmovc %rax,%rdx
# qhasm: r0 -= subt0
# asm 1: sub <subt0=int64#3,<r0=int64#4
# asm 2: sub <subt0=%rdx,<r0=%rcx
sub %rdx,%rcx
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,11 @@
#include "fe25519.h"
void fe25519_unpack(fe25519 *r, const unsigned char x[32])
{
/* assuming little-endian */
r->v[0] = *(unsigned long long *)x;
r->v[1] = *(((unsigned long long *)x)+1);
r->v[2] = *(((unsigned long long *)x)+2);
r->v[3] = *(((unsigned long long *)x)+3);
r->v[3] &= 0x7fffffffffffffffULL;
}

View File

@ -0,0 +1,95 @@
#ifndef GE25519_H
#define GE25519_H
#include "fe25519.h"
#include "sc25519.h"
#define ge25519 crypto_sign_ed25519_amd64_64_ge25519
#define ge25519_base crypto_sign_ed25519_amd64_64_ge25519_base
#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_64_unpackneg_vartime
#define ge25519_pack crypto_sign_ed25519_amd64_64_pack
#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_64_isneutral_vartime
#define ge25519_add crypto_sign_ed25519_amd64_64_ge25519_add
#define ge25519_double crypto_sign_ed25519_amd64_64_ge25519_double
#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_64_double_scalarmult_vartime
#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_64_ge25519_multi_scalarmult_vartime
#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_64_scalarmult_base
#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2
#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3
#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_64_ge25519_add_p1p1
#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1
#define choose_t crypto_sign_ed25519_amd64_64_choose_t
#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_64_ge25519_nielsadd2
#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1
#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1
#define ge25519_p3 ge25519
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
fe25519 t;
} ge25519;
typedef struct
{
fe25519 x;
fe25519 z;
fe25519 y;
fe25519 t;
} ge25519_p1p1;
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
} ge25519_p2;
typedef struct
{
fe25519 ysubx;
fe25519 xaddy;
fe25519 t2d;
} ge25519_niels;
typedef struct
{
fe25519 ysubx;
fe25519 xaddy;
fe25519 z;
fe25519 t2d;
} ge25519_pniels;
extern void ge25519_p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p);
extern void ge25519_p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p);
extern void ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q);
extern void ge25519_dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p);
extern void choose_t(ge25519_niels *t, unsigned long long pos, signed long long b, const ge25519_niels *base_multiples);
extern void ge25519_nielsadd2(ge25519_p3 *r, const ge25519_niels *q);
extern void ge25519_nielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_niels *q);
extern void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_pniels *q);
extern const ge25519 ge25519_base;
extern int ge25519_unpackneg_vartime(ge25519 *r, const unsigned char p[32]);
extern void ge25519_pack(unsigned char r[32], const ge25519 *p);
extern int ge25519_isneutral_vartime(const ge25519 *p);
extern void ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q);
extern void ge25519_double(ge25519 *r, const ge25519 *p);
/* computes [s1]p1 + [s2]ge25519_base */
extern void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const sc25519 *s1, const sc25519 *s2);
extern void ge25519_multi_scalarmult_vartime(ge25519 *r, ge25519 *p, sc25519 *s, const unsigned long long npoints);
extern void ge25519_scalarmult_base(ge25519 *r, const sc25519 *s);
#endif

View File

@ -0,0 +1,8 @@
#include "ge25519.h"
void ge25519_add(ge25519_p3 *r, const ge25519_p3 *p, const ge25519_p3 *q)
{
ge25519_p1p1 grp1p1;
ge25519_add_p1p1(&grp1p1, p, q);
ge25519_p1p1_to_p3(r, &grp1p1);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
#include "ge25519.h"
const ge25519 ge25519_base = {{{0xC9562D608F25D51A, 0x692CC7609525A7B2, 0xC0A4E231FDD6DC5C, 0x216936D3CD6E53FE}},
{{0x6666666666666658, 0x6666666666666666, 0x6666666666666666, 0x6666666666666666}},
{{0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 000000000000000000}},
{{0x6DDE8AB3A5B7DDA3, 0x20F09F80775152F5, 0x66EA4E8E64ABE37D, 0x67875F0FD78B7665}}};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,768 @@
{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}},
{{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}},
{{0xdbbd15674b6fbb59, 0x41e13f00eea2a5ea, 0xcdd49d1cc957c6fa, 0x4f0ebe1faf16ecca}}},
{{{0x8a99a56042b4d5a8, 0x8f2b810c4e60acf6, 0xe09e236bb16e37aa, 0x6bb595a669c92555}},
{{0x9224e7fc933c71d7, 0x9f469d967a0ff5b5, 0x5aa69a65e1d60702, 0x590c063fa87d2e2e}},
{{0x6e347eaadad36802, 0xbaf3599383ee4805, 0x3bcabe10e6076826, 0x49314f0a165ed1b8}}},
{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}},
{{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}},
{{0x9bf211f4f1674834, 0xb84e6b17f62df895, 0xd7de6f075b722a4e, 0x549a04b963bb2a21}}},
{{{0x95fe050a056818bf, 0x327e89715660faa9, 0xc3e8e3cd06a05073, 0x27933f4c7445a49a}},
{{0x287351b98efc099f, 0x6765c6f47dfd2538, 0xca348d3dfb0a9265, 0x680e910321e58727}},
{{0xbf1e45ece51426b0, 0xe32bc63d6dba0f94, 0xe42974d58cf852c0, 0x44f079b1b0e64c18}}},
{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}},
{{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}},
{{0xc832a179e7d003b3, 0x5f729d0a00124d7e, 0x62c1d4a10e6d8ff3, 0x68b8ac5938b27a98}}},
{{{0x499806b67b7d8ca4, 0x575be28427d22739, 0xbb085ce7204553b9, 0x38b64c41ae417884}},
{{0x3a0ceeeb77157131, 0x9b27158900c8af88, 0x8065b668da59a736, 0x51e57bb6a2cc38bd}},
{{0x8f9dad91689de3a4, 0x175f2428f8fb9137, 0x050ab5329fcfb988, 0x7865dfa21354c09f}}},
{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}},
{{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}},
{{0x217a8aacab0fda36, 0xa528c6543d3549c8, 0x37d05b8b13ab7568, 0x233cef623a2cbc37}}},
{{{0xe2a75dedf39234d9, 0x963d7680e1b558f9, 0x2c2741ac6e3c23fb, 0x3a9024a1320e01c3}},
{{0x59b7596604dd3e8f, 0x6cb30377e288702c, 0xb1339c665ed9c323, 0x0915e76061bce52f}},
{{0xdf7de835a834a37e, 0x8be19cda689857ea, 0x2c1185367167b326, 0x589eb3d9dbefd5c2}}},
{{{0xed5b635449aa515e, 0xa865c49f0bc6823a, 0x850c1fe95b42d1c4, 0x30d76d6f03d315b9}},
{{0x2eccdd0e632f9c1d, 0x51d0b69676893115, 0x52dfb76ba8637a58, 0x6dd37d49a00eef39}},
{{0x6c4444172106e4c7, 0xfb53d680928d7f69, 0xb4739ea4694d3f26, 0x10c697112e864bb0}}},
{{{0x6493c4277dbe5fde, 0x265d4fad19ad7ea2, 0x0e00dfc846304590, 0x25e61cabed66fe09}},
{{0x0ca62aa08358c805, 0x6a3d4ae37a204247, 0x7464d3a63b11eddc, 0x03bf9baf550806ef}},
{{0x3f13e128cc586604, 0x6f5873ecb459747e, 0xa0b63dedcc1268f5, 0x566d78634586e22c}}},
{{{0x1637a49f9cc10834, 0xbc8e56d5a89bc451, 0x1cb5ec0f7f7fd2db, 0x33975bca5ecc35d9}},
{{0xa1054285c65a2fd0, 0x6c64112af31667c3, 0x680ae240731aee58, 0x14fba5f34793b22a}},
{{0x3cd746166985f7d4, 0x593e5e84c9c80057, 0x2fc3f2b67b61131e, 0x14829cea83fc526c}}},
{{{0xff437b8497dd95c2, 0x6c744e30aa4eb5a7, 0x9e0c5d613c85e88b, 0x2fd9c71e5f758173}},
{{0x21e70b2f4e71ecb8, 0xe656ddb940a477e3, 0xbf6556cece1d4f80, 0x05fc3bc4535d7b7e}},
{{0x24b8b3ae52afdedd, 0x3495638ced3b30cf, 0x33a4bc83a9be8195, 0x373767475c651f04}}},
{{{0x2fba99fd40d1add9, 0xb307166f96f4d027, 0x4363f05215f03bae, 0x1fbea56c3b18f999}},
{{0x634095cb14246590, 0xef12144016c15535, 0x9e38140c8910bc60, 0x6bf5905730907c8c}},
{{0x0fa778f1e1415b8a, 0x06409ff7bac3a77e, 0x6f52d7b89aa29a50, 0x02521cf67a635a56}}},
{{{0x513fee0b0a9d5294, 0x8f98e75c0fdf5a66, 0xd4618688bfe107ce, 0x3fa00a7e71382ced}},
{{0xb1146720772f5ee4, 0xe8f894b196079ace, 0x4af8224d00ac824a, 0x001753d9f7cd6cc4}},
{{0x3c69232d963ddb34, 0x1dde87dab4973858, 0xaad7d1f9a091f285, 0x12b5fe2fa048edb6}}},
{{{0x71f0fbc496fce34d, 0x73b9826badf35bed, 0xd2047261ff28c561, 0x749b76f96fb1206f}},
{{0xdf2b7c26ad6f1e92, 0x4b66d323504b8913, 0x8c409dc0751c8bc3, 0x6f7e93c20796c7b8}},
{{0x1f5af604aea6ae05, 0xc12351f1bee49c99, 0x61a808b5eeff6b66, 0x0fcec10f01e02151}}},
{{{0x644d58a649fe1e44, 0x21fcaea231ad777e, 0x02441c5a887fd0d2, 0x4901aa7183c511f3}},
{{0x3df2d29dc4244e45, 0x2b020e7493d8de0a, 0x6cc8067e820c214d, 0x413779166feab90a}},
{{0x08b1b7548c1af8f0, 0xce0f7a7c246299b4, 0xf760b0f91e06d939, 0x41bb887b726d1213}}},
{{{0x40e87d44744346be, 0x1d48dad415b52b25, 0x7c3a8a18a13b603e, 0x4eb728c12fcdbdf7}},
{{0x7e234c597c6691ae, 0x64889d3d0a85b4c8, 0xdae2c90c354afae7, 0x0a871e070c6a9e1d}},
{{0x3301b5994bbc8989, 0x736bae3a5bdd4260, 0x0d61ade219d59e3c, 0x3ee7300f2685d464}}},
{{{0xf5d255e49e7dd6b7, 0x8016115c610b1eac, 0x3c99975d92e187ca, 0x13815762979125c2}},
{{0x43fa7947841e7518, 0xe5c6fa59639c46d7, 0xa1065e1de3052b74, 0x7d47c6a2cfb89030}},
{{0x3fdad0148ef0d6e0, 0x9d3e749a91546f3c, 0x71ec621026bb8157, 0x148cf58d34c9ec80}}},
{{{0x46a492f67934f027, 0x469984bef6840aa9, 0x5ca1bc2a89611854, 0x3ff2fa1ebd5dbbd4}},
{{0xe2572f7d9ae4756d, 0x56c345bb88f3487f, 0x9fd10b6d6960a88d, 0x278febad4eaea1b9}},
{{0xb1aa681f8c933966, 0x8c21949c20290c98, 0x39115291219d3c52, 0x4104dd02fe9c677b}}},
{{{0x72b2bf5e1124422a, 0xa1fa0c3398a33ab5, 0x94cb6101fa52b666, 0x2c863b00afaf53d5}},
{{0x81214e06db096ab8, 0x21a8b6c90ce44f35, 0x6524c12a409e2af5, 0x0165b5a48efca481}},
{{0xf190a474a0846a76, 0x12eff984cd2f7cc0, 0x695e290658aa2b8f, 0x591b67d9bffec8b8}}},
{{{0x312f0d1c80b49bfa, 0x5979515eabf3ec8a, 0x727033c09ef01c88, 0x3de02ec7ca8f7bcb}},
{{0x99b9b3719f18b55d, 0xe465e5faa18c641e, 0x61081136c29f05ed, 0x489b4f867030128b}},
{{0xd232102d3aeb92ef, 0xe16253b46116a861, 0x3d7eabe7190baa24, 0x49f5fbba496cbebf}}},
{{{0x30949a108a5bcfd4, 0xdc40dd70bc6473eb, 0x92c294c1307c0d1c, 0x5604a86dcbfa6e74}},
{{0x155d628c1e9c572e, 0x8a4d86acc5884741, 0x91a352f6515763eb, 0x06a1a6c28867515b}},
{{0x7288d1d47c1764b6, 0x72541140e0418b51, 0x9f031a6018acf6d1, 0x20989e89fe2742c6}}},
{{{0x499777fd3a2dcc7f, 0x32857c2ca54fd892, 0xa279d864d207e3a0, 0x0403ed1d0ca67e29}},
{{0x1674278b85eaec2e, 0x5621dc077acb2bdf, 0x640a4c1661cbf45a, 0x730b9950f70595d3}},
{{0xc94b2d35874ec552, 0xc5e6c8cf98246f8d, 0xf7cb46fa16c035ce, 0x5bd7454308303dcc}}},
{{{0x7f9ad19528b24cc2, 0x7f6b54656335c181, 0x66b8b66e4fc07236, 0x133a78007380ad83}},
{{0x85c4932115e7792a, 0xc64c89a2bdcdddc9, 0x9d1e3da8ada3d762, 0x5bb7db123067f82c}},
{{0x0961f467c6ca62be, 0x04ec21d6211952ee, 0x182360779bd54770, 0x740dca6d58f0e0d2}}},
{{{0xdf48ee0752cfce4e, 0xc3fffaf306ec08b7, 0x05710b2ab95459c4, 0x161d25fa963ea38d}},
{{0x231a8c570478433c, 0xb7b5270ec281439d, 0xdbaa99eae3d9079f, 0x2c03f5256c2b03d9}},
{{0x790f18757b53a47d, 0x307b0130cf0c5879, 0x31903d77257ef7f9, 0x699468bdbd96bbaf}}},
{{{0xbd1f2f46f4dafecf, 0x7cef0114a47fd6f7, 0xd31ffdda4a47b37f, 0x525219a473905785}},
{{0xd8dd3de66aa91948, 0x485064c22fc0d2cc, 0x9b48246634fdea2f, 0x293e1c4e6c4a2e3a}},
{{0x376e134b925112e1, 0x703778b5dca15da0, 0xb04589af461c3111, 0x5b605c447f032823}}},
{{{0xb965805920c47c89, 0xe7f0100c923b8fcc, 0x0001256502e2ef77, 0x24a76dcea8aeb3ee}},
{{0x3be9fec6f0e7f04c, 0x866a579e75e34962, 0x5542ef161e1de61a, 0x2f12fef4cc5abdd5}},
{{0x0a4522b2dfc0c740, 0x10d06e7f40c9a407, 0xc6cf144178cff668, 0x5e607b2518a43790}}},
{{{0x58b31d8f6cdf1818, 0x35cfa74fc36258a2, 0xe1b3ff4f66e61d6e, 0x5067acab6ccdd5f7}},
{{0xa02c431ca596cf14, 0xe3c42d40aed3e400, 0xd24526802e0f26db, 0x201f33139e457068}},
{{0xfd527f6b08039d51, 0x18b14964017c0006, 0xd5220eb02e25a4a8, 0x397cba8862460375}}},
{{{0x30c13093f05959b2, 0xe23aa18de9a97976, 0x222fd491721d5e26, 0x2339d320766e6c3a}},
{{0x7815c3fbc81379e7, 0xa6619420dde12af1, 0xffa9c0f885a8fdd5, 0x771b4022c1e1c252}},
{{0xd87dd986513a2fa7, 0xf5ac9b71f9d4cf08, 0xd06bc31b1ea283b3, 0x331a189219971a76}}},
{{{0xf5166f45fb4f80c6, 0x9c36c7de61c775cf, 0xe3d4e81b9041d91c, 0x31167c6b83bdfe21}},
{{0x26512f3a9d7572af, 0x5bcbe28868074a9e, 0x84edc1c11180f7c4, 0x1ac9619ff649a67b}},
{{0xf22b3842524b1068, 0x5068343bee9ce987, 0xfc9d71844a6250c8, 0x612436341f08b111}}},
{{{0xd99d41db874e898d, 0x09fea5f16c07dc20, 0x793d2c67d00f9bbc, 0x46ebe2309e5eff40}},
{{0x8b6349e31a2d2638, 0x9ddfb7009bd3fd35, 0x7f8bf1b8a3a06ba4, 0x1522aa3178d90445}},
{{0x2c382f5369614938, 0xdafe409ab72d6d10, 0xe8c83391b646f227, 0x45fe70f50524306c}}},
{{{0xda4875a6960c0b8c, 0x5b68d076ef0e2f20, 0x07fb51cf3d0b8fd4, 0x428d1623a0e392d4}},
{{0x62f24920c8951491, 0x05f007c83f630ca2, 0x6fbb45d2f5c9d4b8, 0x16619f6db57a2245}},
{{0x084f4a4401a308fd, 0xa82219c376a5caac, 0xdeb8de4643d1bc7d, 0x1d81592d60bd38c6}}},
{{{0x61368756a60dac5f, 0x17e02f6aebabdc57, 0x7f193f2d4cce0f7d, 0x20234a7789ecdcf0}},
{{0x8765b69f7b85c5e8, 0x6ff0678bd168bab2, 0x3a70e77c1d330f9b, 0x3a5f6d51b0af8e7c}},
{{0x76d20db67178b252, 0x071c34f9d51ed160, 0xf62a4a20b3e41170, 0x7cd682353cffe366}}},
{{{0x0be1a45bd887fab6, 0x2a846a32ba403b6e, 0xd9921012e96e6000, 0x2838c8863bdc0943}},
{{0xa665cd6068acf4f3, 0x42d92d183cd7e3d3, 0x5759389d336025d9, 0x3ef0253b2b2cd8ff}},
{{0xd16bb0cf4a465030, 0xfa496b4115c577ab, 0x82cfae8af4ab419d, 0x21dcb8a606a82812}}},
{{{0x5c6004468c9d9fc8, 0x2540096ed42aa3cb, 0x125b4d4c12ee2f9c, 0x0bc3d08194a31dab}},
{{0x9a8d00fabe7731ba, 0x8203607e629e1889, 0xb2cc023743f3d97f, 0x5d840dbf6c6f678b}},
{{0x706e380d309fe18b, 0x6eb02da6b9e165c7, 0x57bbba997dae20ab, 0x3a4276232ac196dd}}},
{{{0x4b42432c8a7084fa, 0x898a19e3dfb9e545, 0xbe9f00219c58e45d, 0x1ff177cea16debd1}},
{{0x3bf8c172db447ecb, 0x5fcfc41fc6282dbd, 0x80acffc075aa15fe, 0x0770c9e824e1a9f9}},
{{0xcf61d99a45b5b5fd, 0x860984e91b3a7924, 0xe7300919303e3e89, 0x39f264fd41500b1e}}},
{{{0xa7ad3417dbe7e29c, 0xbd94376a2b9c139c, 0xa0e91b8e93597ba9, 0x1712d73468889840}},
{{0xd19b4aabfe097be1, 0xa46dfce1dfe01929, 0xc3c908942ca6f1ff, 0x65c621272c35f14e}},
{{0xe72b89f8ce3193dd, 0x4d103356a125c0bb, 0x0419a93d2e1cfe83, 0x22f9800ab19ce272}}},
{{{0x605a368a3e9ef8cb, 0xe3e9c022a5504715, 0x553d48b05f24248f, 0x13f416cd647626e5}},
{{0x42029fdd9a6efdac, 0xb912cebe34a54941, 0x640f64b987bdf37b, 0x4171a4d38598cab4}},
{{0xfa2758aa99c94c8c, 0x23006f6fb000b807, 0xfbd291ddadda5392, 0x508214fa574bd1ab}}},
{{{0xc20269153ed6fe4b, 0xa65a6739511d77c4, 0xcbde26462c14af94, 0x22f960ec6faba74b}},
{{0x461a15bb53d003d6, 0xb2102888bcf3c965, 0x27c576756c683a5a, 0x3a7758a4c86cb447}},
{{0x548111f693ae5076, 0x1dae21df1dfd54a6, 0x12248c90f3115e65, 0x5d9fd15f8de7f494}}},
{{{0x031408d36d63727f, 0x6a379aefd7c7b533, 0xa9e18fc5ccaee24b, 0x332f35914f8fbed3}},
{{0x3f244d2aeed7521e, 0x8e3a9028432e9615, 0xe164ba772e9c16d4, 0x3bc187fa47eb98d8}},
{{0x6d470115ea86c20c, 0x998ab7cb6c46d125, 0xd77832b53a660188, 0x450d81ce906fba03}}},
{{{0x6e7bb6a1a6205275, 0xaa4f21d7413c8e83, 0x6f56d155e88f5cb2, 0x2de25d4ba6345be1}},
{{0xd074d8961cae743f, 0xf86d18f5ee1c63ed, 0x97bdc55be7f4ed29, 0x4cbad279663ab108}},
{{0x80d19024a0d71fcd, 0xc525c20afb288af8, 0xb1a3974b5f3a6419, 0x7d7fbcefe2007233}}},
{{{0xfaef1e6a266b2801, 0x866c68c4d5739f16, 0xf68a2fbc1b03762c, 0x5975435e87b75a8d}},
{{0xcd7c5dc5f3c29094, 0xc781a29a2a9105ab, 0x80c61d36421c3058, 0x4f9cd196dcd8d4d7}},
{{0x199297d86a7b3768, 0xd0d058241ad17a63, 0xba029cad5c1c0c17, 0x7ccdd084387a0307}}},
{{{0xdca6422c6d260417, 0xae153d50948240bd, 0xa9c0c1b4fb68c677, 0x428bd0ed61d0cf53}},
{{0x9b0c84186760cc93, 0xcdae007a1ab32a99, 0xa88dec86620bda18, 0x3593ca848190ca44}},
{{0x9213189a5e849aa7, 0xd4d8c33565d8facd, 0x8c52545b53fdbbd1, 0x27398308da2d63e6}}},
{{{0x42c38d28435ed413, 0xbd50f3603278ccc9, 0xbb07ab1a79da03ef, 0x269597aebe8c3355}},
{{0xb9a10e4c0a702453, 0x0fa25866d57d1bde, 0xffb9d9b5cd27daf7, 0x572c2945492c33fd}},
{{0xc77fc745d6cd30be, 0xe4dfe8d3e3baaefb, 0xa22c8830aa5dda0c, 0x7f985498c05bca80}}},
{{{0x3849ce889f0be117, 0x8005ad1b7b54a288, 0x3da3c39f23fc921c, 0x76c2ec470a31f304}},
{{0xd35615520fbf6363, 0x08045a45cf4dfba6, 0xeec24fbc873fa0c2, 0x30f2653cd69b12e7}},
{{0x8a08c938aac10c85, 0x46179b60db276bcb, 0xa920c01e0e6fac70, 0x2f1273f1596473da}}},
{{{0x4739fc7c8ae01e11, 0xfd5274904a6aab9f, 0x41d98a8287728f2e, 0x5d9e572ad85b69f2}},
{{0x30488bd755a70bc0, 0x06d6b5a4f1d442e7, 0xead1a69ebc596162, 0x38ac1997edc5f784}},
{{0x0666b517a751b13b, 0x747d06867e9b858c, 0xacacc011454dde49, 0x22dfcd9cbfe9e69c}}},
{{{0x8ddbd2e0c30d0cd9, 0xad8e665facbb4333, 0x8f6b258c322a961f, 0x6b2916c05448c1c7}},
{{0x56ec59b4103be0a1, 0x2ee3baecd259f969, 0x797cb29413f5cd32, 0x0fe9877824cde472}},
{{0x7edb34d10aba913b, 0x4ea3cd822e6dac0e, 0x66083dff6578f815, 0x4c303f307ff00a17}}},
{{{0xd30a3bd617b28c85, 0xc5d377b739773bea, 0xc6c6e78c1e6a5cbf, 0x0d61b8f78b2ab7c4}},
{{0x29fc03580dd94500, 0xecd27aa46fbbec93, 0x130a155fc2e2a7f8, 0x416b151ab706a1d5}},
{{0x56a8d7efe9c136b0, 0xbd07e5cd58e44b20, 0xafe62fda1b57e0ab, 0x191a2af74277e8d2}}},
{{{0xce16f74bc53c1431, 0x2b9725ce2072edde, 0xb8b9c36fb5b23ee7, 0x7e2e0e450b5cc908}},
{{0x9fe62b434f460efb, 0xded303d4a63607d6, 0xf052210eb7a0da24, 0x237e7dbe00545b93}},
{{0x013575ed6701b430, 0x231094e69f0bfd10, 0x75320f1583e47f22, 0x71afa699b11155e3}}},
{{{0x65ce6f9b3953b61d, 0xc65839eaafa141e6, 0x0f435ffda9f759fe, 0x021142e9c2b1c28e}},
{{0xea423c1c473b50d6, 0x51e87a1f3b38ef10, 0x9b84bf5fb2c9be95, 0x00731fbc78f89a1c}},
{{0xe430c71848f81880, 0xbf960c225ecec119, 0xb6dae0836bba15e3, 0x4c4d6f3347e15808}}},
{{{0x18f7eccfc17d1fc9, 0x6c75f5a651403c14, 0xdbde712bf7ee0cdf, 0x193fddaaa7e47a22}},
{{0x2f0cddfc988f1970, 0x6b916227b0b9f51b, 0x6ec7b6c4779176be, 0x38bf9500a88f9fa8}},
{{0x1fd2c93c37e8876f, 0xa2f61e5a18d1462c, 0x5080f58239241276, 0x6a6fb99ebf0d4969}}},
{{{0x6a46c1bb560855eb, 0x2416bb38f893f09d, 0xd71d11378f71acc1, 0x75f76914a31896ea}},
{{0xeeb122b5b6e423c6, 0x939d7010f286ff8e, 0x90a92a831dcf5d8c, 0x136fda9f42c5eb10}},
{{0xf94cdfb1a305bdd1, 0x0f364b9d9ff82c08, 0x2a87d8a5c3bb588a, 0x022183510be8dcba}}},
{{{0x4af766385ead2d14, 0xa08ed880ca7c5830, 0x0d13a6e610211e3d, 0x6a071ce17b806c03}},
{{0x9d5a710143307a7f, 0xb063de9ec47da45f, 0x22bbfe52be927ad3, 0x1387c441fd40426c}},
{{0xb5d3c3d187978af8, 0x722b5a3d7f0e4413, 0x0d7b4848bb477ca0, 0x3171b26aaf1edc92}}},
{{{0xa92f319097564ca8, 0xff7bb84c2275e119, 0x4f55fe37a4875150, 0x221fd4873cf0835a}},
{{0xa60db7d8b28a47d1, 0xa6bf14d61770a4f1, 0xd4a1f89353ddbd58, 0x6c514a63344243e9}},
{{0x2322204f3a156341, 0xfb73e0e9ba0a032d, 0xfce0dd4c410f030e, 0x48daa596fb924aaa}}},
{{{0x6eca8e665ca59cc7, 0xa847254b2e38aca0, 0x31afc708d21e17ce, 0x676dd6fccad84af7}},
{{0x14f61d5dc84c9793, 0x9941f9e3ef418206, 0xcdf5b88f346277ac, 0x58c837fa0e8a79a9}},
{{0x0cf9688596fc9058, 0x1ddcbbf37b56a01b, 0xdcc2e77d4935d66a, 0x1c4f73f2c6a57f0a}}},
{{{0x0e7a4fbd305fa0bb, 0x829d4ce054c663ad, 0xf421c3832fe33848, 0x795ac80d1bf64c42}},
{{0xb36e706efc7c3484, 0x73dfc9b4c3c1cf61, 0xeb1d79c9781cc7e5, 0x70459adb7daf675c}},
{{0x1b91db4991b42bb3, 0x572696234b02dcca, 0x9fdf9ee51f8c78dc, 0x5fe162848ce21fd3}}},
{{{0x4e59214fe194961a, 0x49be7dc70d71cd4f, 0x9300cfd23b50f22d, 0x4789d446fc917232}},
{{0x2879852d5d7cb208, 0xb8dedd70687df2e7, 0xdc0bffab21687891, 0x2b44c043677daa35}},
{{0x1a1c87ab074eb78e, 0xfac6d18e99daf467, 0x3eacbbcd484f9067, 0x60c52eef2bb9a4e4}}},
{{{0x0b5d89bc3bfd8bf1, 0xb06b9237c9f3551a, 0x0e4c16b0d53028f5, 0x10bc9c312ccfcaab}},
{{0x702bc5c27cae6d11, 0x44c7699b54a48cab, 0xefbc4056ba492eb2, 0x70d77248d9b6676d}},
{{0xaa8ae84b3ec2a05b, 0x98699ef4ed1781e0, 0x794513e4708e85d1, 0x63755bd3a976f413}}},
{{{0xb55fa03e2ad10853, 0x356f75909ee63569, 0x9ff9f1fdbe69b890, 0x0d8cc1c48bc16f84}},
{{0x3dc7101897f1acb7, 0x5dda7d5ec165bbd8, 0x508e5b9c0fa1020f, 0x2763751737c52a56}},
{{0x029402d36eb419a9, 0xf0b44e7e77b460a5, 0xcfa86230d43c4956, 0x70c2dd8a7ad166e7}}},
{{{0x656194509f6fec0e, 0xee2e7ea946c6518d, 0x9733c1f367e09b5c, 0x2e0fac6363948495}},
{{0x91d4967db8ed7e13, 0x74252f0ad776817a, 0xe40982e00d852564, 0x32b8613816a53ce5}},
{{0x79e7f7bee448cd64, 0x6ac83a67087886d0, 0xf89fd4d9a0e4db2e, 0x4179215c735a4f41}}},
{{{0x8c7094e7d7dced2a, 0x97fb8ac347d39c70, 0xe13be033a906d902, 0x700344a30cd99d76}},
{{0xe4ae33b9286bcd34, 0xb7ef7eb6559dd6dc, 0x278b141fb3d38e1f, 0x31fa85662241c286}},
{{0xaf826c422e3622f4, 0xc12029879833502d, 0x9bc1b7e12b389123, 0x24bb2312a9952489}}},
{{{0xb1a8ed1732de67c3, 0x3cb49418461b4948, 0x8ebd434376cfbcd2, 0x0fee3e871e188008}},
{{0x41f80c2af5f85c6b, 0x687284c304fa6794, 0x8945df99a3ba1bad, 0x0d1d2af9ffeb5d16}},
{{0xa9da8aa132621edf, 0x30b822a159226579, 0x4004197ba79ac193, 0x16acd79718531d76}}},
{{{0x72df72af2d9b1d3d, 0x63462a36a432245a, 0x3ecea07916b39637, 0x123e0ef6b9302309}},
{{0xc959c6c57887b6ad, 0x94e19ead5f90feba, 0x16e24e62a342f504, 0x164ed34b18161700}},
{{0x487ed94c192fe69a, 0x61ae2cea3a911513, 0x877bf6d3b9a4de27, 0x78da0fc61073f3eb}}},
{{{0x5bf15d28e52bc66a, 0x2c47e31870f01a8e, 0x2419afbc06c28bdd, 0x2d25deeb256b173a}},
{{0xa29f80f1680c3a94, 0x71f77e151ae9e7e6, 0x1100f15848017973, 0x054aa4b316b38ddd}},
{{0xdfc8468d19267cb8, 0x0b28789c66e54daf, 0x2aeb1d2a666eec17, 0x134610a6ab7da760}}},
{{{0x51138ec78df6b0fe, 0x5397da89e575f51b, 0x09207a1d717af1b9, 0x2102fdba2b20d650}},
{{0xcd2a65e777d1f515, 0x548991878faa60f1, 0xb1b73bbcdabc06e5, 0x654878cba97cc9fb}},
{{0x969ee405055ce6a1, 0x36bca7681251ad29, 0x3a1af517aa7da415, 0x0ad725db29ecb2ba}}},
{{{0xdc4267b1834e2457, 0xb67544b570ce1bc5, 0x1af07a0bf7d15ed7, 0x4aefcffb71a03650}},
{{0xfec7bc0c9b056f85, 0x537d5268e7f5ffd7, 0x77afc6624312aefa, 0x4f675f5302399fd9}},
{{0xc32d36360415171e, 0xcd2bef118998483b, 0x870a6eadd0945110, 0x0bccbb72a2a86561}}},
{{{0x185e962feab1a9c8, 0x86e7e63565147dcd, 0xb092e031bb5b6df2, 0x4024f0ab59d6b73e}},
{{0x186d5e4c50fe1296, 0xe0397b82fee89f7e, 0x3bc7f6c5507031b0, 0x6678fd69108f37c2}},
{{0x1586fa31636863c2, 0x07f68c48572d33f2, 0x4f73cc9f789eaefc, 0x2d42e2108ead4701}}},
{{{0x97f5131594dfd29b, 0x6155985d313f4c6a, 0xeba13f0708455010, 0x676b2608b8d2d322}},
{{0x21717b0d0f537593, 0x914e690b131e064c, 0x1bb687ae752ae09f, 0x420bf3a79b423c6e}},
{{0x8138ba651c5b2b47, 0x8671b6ec311b1b80, 0x7bff0cb1bc3135b0, 0x745d2ffa9c0cf1e0}}},
{{{0xbf525a1e2bc9c8bd, 0xea5b260826479d81, 0xd511c70edf0155db, 0x1ae23ceb960cf5d0}},
{{0x6036df5721d34e6a, 0xb1db8827997bb3d0, 0xd3c209c3c8756afa, 0x06e15be54c1dc839}},
{{0x5b725d871932994a, 0x32351cb5ceb1dab0, 0x7dc41549dab7ca05, 0x58ded861278ec1f7}}},
{{{0xd8173793f266c55c, 0xc8c976c5cc454e49, 0x5ce382f8bc26c3a8, 0x2ff39de85485f6f9}},
{{0x2dfb5ba8b6c2c9a8, 0x48eeef8ef52c598c, 0x33809107f12d1573, 0x08ba696b531d5bd8}},
{{0x77ed3eeec3efc57a, 0x04e05517d4ff4811, 0xea3d7a3ff1a671cb, 0x120633b4947cfe54}}},
{{{0x0b94987891610042, 0x4ee7b13cecebfae8, 0x70be739594f0a4c0, 0x35d30a99b4d59185}},
{{0x82bd31474912100a, 0xde237b6d7e6fbe06, 0xe11e761911ea79c6, 0x07433be3cb393bde}},
{{0xff7944c05ce997f4, 0x575d3de4b05c51a3, 0x583381fd5a76847c, 0x2d873ede7af6da9f}}},
{{{0x157a316443373409, 0xfab8b7eef4aa81d9, 0xb093fee6f5a64806, 0x2e773654707fa7b6}},
{{0xaa6202e14e5df981, 0xa20d59175015e1f5, 0x18a275d3bae21d6c, 0x0543618a01600253}},
{{0x0deabdf4974c23c1, 0xaa6f0a259dce4693, 0x04202cb8a29aba2c, 0x4b1443362d07960d}}},
{{{0xccc4b7c7b66e1f7a, 0x44157e25f50c2f7e, 0x3ef06dfc713eaf1c, 0x582f446752da63f7}},
{{0x967c54e91c529ccb, 0x30f6269264c635fb, 0x2747aff478121965, 0x17038418eaf66f5c}},
{{0xc6317bd320324ce4, 0xa81042e8a4488bc4, 0xb21ef18b4e5a1364, 0x0c2a1c4bcda28dc9}}},
{{{0xd24dc7d06f1f0447, 0xb2269e3edb87c059, 0xd15b0272fbb2d28f, 0x7c558bd1c6f64877}},
{{0xedc4814869bd6945, 0x0d6d907dbe1c8d22, 0xc63bd212d55cc5ab, 0x5a6a9b30a314dc83}},
{{0xd0ec1524d396463d, 0x12bb628ac35a24f0, 0xa50c3a791cbc5fa4, 0x0404a5ca0afbafc3}}},
{{{0x8c1f40070aa743d6, 0xccbad0cb5b265ee8, 0x574b046b668fd2de, 0x46395bfdcadd9633}},
{{0x62bc9e1b2a416fd1, 0xb5c6f728e350598b, 0x04343fd83d5d6967, 0x39527516e7f8ee98}},
{{0x117fdb2d1a5d9a9c, 0x9c7745bcd1005c2a, 0xefd4bef154d56fea, 0x76579a29e822d016}}},
{{{0x45b68e7e49c02a17, 0x23cd51a2bca9a37f, 0x3ed65f11ec224c1b, 0x43a384dc9e05bdb1}},
{{0x333cb51352b434f2, 0xd832284993de80e1, 0xb5512887750d35ce, 0x02c514bb2a2777c1}},
{{0x684bd5da8bf1b645, 0xfb8bd37ef6b54b53, 0x313916d7a9b0d253, 0x1160920961548059}}},
{{{0xb44d166929dacfaa, 0xda529f4c8413598f, 0xe9ef63ca453d5559, 0x351e125bc5698e0b}},
{{0x7a385616369b4dcd, 0x75c02ca7655c3563, 0x7dc21bf9d4f18021, 0x2f637d7491e6e042}},
{{0xd4b49b461af67bbe, 0xd603037ac8ab8961, 0x71dee19ff9a699fb, 0x7f182d06e7ce2a9a}}},
{{{0x7a7c8e64ab0168ec, 0xcb5a4a5515edc543, 0x095519d347cd0eda, 0x67d4ac8c343e93b0}},
{{0x09454b728e217522, 0xaa58e8f4d484b8d8, 0xd358254d7f46903c, 0x44acc043241c5217}},
{{0x1c7d6bbb4f7a5777, 0x8b35fed4918313e1, 0x4adca1c6c96b4684, 0x556d1c8312ad71bd}}},
{{{0x17ef40e30c8d3982, 0x31f7073e15a3fa34, 0x4f21f3cb0773646e, 0x746c6c6d1d824eff}},
{{0x81f06756b11be821, 0x0faff82310a3f3dd, 0xf8b2d0556a99465d, 0x097abe38cc8c7f05}},
{{0x0c49c9877ea52da4, 0x4c4369559bdc1d43, 0x022c3809f7ccebd2, 0x577e14a34bee84bd}}},
{{{0xf0e268ac61a73b0a, 0xf2fafa103791a5f5, 0xc1e13e826b6d00e9, 0x60fa7ee96fd78f42}},
{{0x94fecebebd4dd72b, 0xf46a4fda060f2211, 0x124a5977c0c8d1ff, 0x705304b8fb009295}},
{{0xb63d1d354d296ec6, 0xf3c3053e5fad31d8, 0x670b958cb4bd42ec, 0x21398e0ca16353fd}}},
{{{0x89f5058a382b33f3, 0x5ae2ba0bad48c0b4, 0x8f93b503a53db36e, 0x5aa3ed9d95a232e6}},
{{0x2798aaf9b4b75601, 0x5eac72135c8dad72, 0xd2ceaa6161b7a023, 0x1bbfb284e98f7d4e}},
{{0x656777e9c7d96561, 0xcb2b125472c78036, 0x65053299d9506eee, 0x4a07e14e5e8957cc}}},
{{{0x4ee412cb980df999, 0xa315d76f3c6ec771, 0xbba5edde925c77fd, 0x3f0bac391d313402}},
{{0x240b58cdc477a49b, 0xfd38dade6447f017, 0x19928d32a7c86aad, 0x50af7aed84afa081}},
{{0x6e4fde0115f65be5, 0x29982621216109b2, 0x780205810badd6d9, 0x1921a316baebd006}}},
{{{0x89422f7edfb870fc, 0x2c296beb4f76b3bd, 0x0738f1d436c24df7, 0x6458df41e273aeb0}},
{{0xd75aad9ad9f3c18b, 0x566a0eef60b1c19c, 0x3e9a0bac255c0ed9, 0x7b049deca062c7f5}},
{{0xdccbe37a35444483, 0x758879330fedbe93, 0x786004c312c5dd87, 0x6093dccbc2950e64}}},
{{{0x1ff39a8585e0706d, 0x36d0a5d8b3e73933, 0x43b9f2e1718f453b, 0x57d1ea084827a97c}},
{{0x6bdeeebe6084034b, 0x3199c2b6780fb854, 0x973376abb62d0695, 0x6e3180c98b647d90}},
{{0xee7ab6e7a128b071, 0xa4c1596d93a88baa, 0xf7b4de82b2216130, 0x363e999ddd97bd18}}},
{{{0x96a843c135ee1fc4, 0x976eb35508e4c8cf, 0xb42f6801b58cd330, 0x48ee9b78693a052b}},
{{0x2f1848dce24baec6, 0x769b7255babcaf60, 0x90cb3c6e3cefe931, 0x231f979bc6f9b355}},
{{0x5c31de4bcc2af3c6, 0xb04bb030fe208d1f, 0xb78d7009c14fb466, 0x079bfa9b08792413}}},
{{{0xe3903a51da300df4, 0x843964233da95ab0, 0xed3cf12d0b356480, 0x038c77f684817194}},
{{0xf3c9ed80a2d54245, 0x0aa08b7877f63952, 0xd76dac63d1085475, 0x1ef4fb159470636b}},
{{0x854e5ee65b167bec, 0x59590a4296d0cdc2, 0x72b2df3498102199, 0x575ee92a4a0bff56}}},
{{{0xd4c080908a182fcf, 0x30e170c299489dbd, 0x05babd5752f733de, 0x43d4e7112cd3fd00}},
{{0x5d46bc450aa4d801, 0xc3af1227a533b9d8, 0x389e3b262b8906c2, 0x200a1e7e382f581b}},
{{0x518db967eaf93ac5, 0x71bc989b056652c0, 0xfe2b85d9567197f5, 0x050eca52651e4e38}}},
{{{0xc3431ade453f0c9c, 0xe9f5045eff703b9b, 0xfcd97ac9ed847b3d, 0x4b0ee6c21c58f4c6}},
{{0x97ac397660e668ea, 0x9b19bbfe153ab497, 0x4cb179b534eca79f, 0x6151c09fa131ae57}},
{{0x3af55c0dfdf05d96, 0xdd262ee02ab4ee7a, 0x11b2bb8712171709, 0x1fef24fa800f030b}}},
{{{0x37d653fb1aa73196, 0x0f9495303fd76418, 0xad200b09fb3a17b2, 0x544d49292fc8613e}},
{{0x22d2aff530976b86, 0x8d90b806c2d24604, 0xdca1896c4de5bae5, 0x28005fe6c8340c17}},
{{0x6aefba9f34528688, 0x5c1bff9425107da1, 0xf75bbbcd66d94b36, 0x72e472930f316dfa}}},
{{{0x2695208c9781084f, 0xb1502a0b23450ee1, 0xfd9daea603efde02, 0x5a9d2e8c2733a34c}},
{{0x07f3f635d32a7627, 0x7aaa4d865f6566f0, 0x3c85e79728d04450, 0x1fee7f000fe06438}},
{{0x765305da03dbf7e5, 0xa4daf2491434cdbd, 0x7b4ad5cdd24a88ec, 0x00f94051ee040543}}},
{{{0x8d356b23c3d330b2, 0xf21c8b9bb0471b06, 0xb36c316c6e42b83c, 0x07d79c7e8beab10d}},
{{0xd7ef93bb07af9753, 0x583ed0cf3db766a7, 0xce6998bf6e0b1ec5, 0x47b7ffd25dd40452}},
{{0x87fbfb9cbc08dd12, 0x8a066b3ae1eec29b, 0x0d57242bdb1fc1bf, 0x1c3520a35ea64bb6}}},
{{{0x80d253a6bccba34a, 0x3e61c3a13838219b, 0x90c3b6019882e396, 0x1c3d05775d0ee66f}},
{{0xcda86f40216bc059, 0x1fbb231d12bcd87e, 0xb4956a9e17c70990, 0x38750c3b66d12e55}},
{{0x692ef1409422e51a, 0xcbc0c73c2b5df671, 0x21014fe7744ce029, 0x0621e2c7d330487c}}},
{{{0xaf9860cc8259838d, 0x90ea48c1c69f9adc, 0x6526483765581e30, 0x0007d6097bd3a5bc}},
{{0xb7ae1796b0dbf0f3, 0x54dfafb9e17ce196, 0x25923071e9aaa3b4, 0x5d8e589ca1002e9d}},
{{0xc0bf1d950842a94b, 0xb2d3c363588f2e3e, 0x0a961438bb51e2ef, 0x1583d7783c1cbf86}}},
{{{0xeceea2ef5da27ae1, 0x597c3a1455670174, 0xc9a62a126609167a, 0x252a5f2e81ed8f70}},
{{0x90034704cc9d28c7, 0x1d1b679ef72cc58f, 0x16e12b5fbe5b8726, 0x4958064e83c5580a}},
{{0x0d2894265066e80d, 0xfcc3f785307c8c6b, 0x1b53da780c1112fd, 0x079c170bd843b388}}},
{{{0x0506ece464fa6fff, 0xbee3431e6205e523, 0x3579422451b8ea42, 0x6dec05e34ac9fb00}},
{{0xcdd6cd50c0d5d056, 0x9af7686dbb03573b, 0x3ca6723ff3c3ef48, 0x6768c0d7317b8acc}},
{{0x94b625e5f155c1b3, 0x417bf3a7997b7b91, 0xc22cbddc6d6b2600, 0x51445e14ddcd52f4}}},
{{{0x57502b4b3b144951, 0x8e67ff6b444bbcb3, 0xb8bd6927166385db, 0x13186f31e39295c8}},
{{0x893147ab2bbea455, 0x8c53a24f92079129, 0x4b49f948be30f7a7, 0x12e990086e4fd43d}},
{{0xf10c96b37fdfbb2e, 0x9f9a935e121ceaf9, 0xdf1136c43a5b983f, 0x77b2e3f05d3e99af}}},
{{{0x296fa9c59c2ec4de, 0xbc8b61bf4f84f3cb, 0x1c7706d917a8f908, 0x63b795fc7ad3255d}},
{{0xd598639c12ddb0a4, 0xa5d19f30c024866b, 0xd17c2f0358fce460, 0x07a195152e095e8a}},
{{0xa8368f02389e5fc8, 0x90433b02cf8de43b, 0xafa1fd5dc5412643, 0x3e8fe83d032f0137}}},
{{{0x2f8b15b90570a294, 0x94f2427067084549, 0xde1c5ae161bbfd84, 0x75ba3b797fac4007}},
{{0x08704c8de8efd13c, 0xdfc51a8e33e03731, 0xa59d5da51260cde3, 0x22d60899a6258c86}},
{{0x6239dbc070cdd196, 0x60fe8a8b6c7d8a9a, 0xb38847bceb401260, 0x0904d07b87779e5e}}},
{{{0xb4ce1fd4ddba919c, 0xcf31db3ec74c8daa, 0x2c63cc63ad86cc51, 0x43e2143fbc1dde07}},
{{0xf4322d6648f940b9, 0x06952f0cbd2d0c39, 0x167697ada081f931, 0x6240aacebaf72a6c}},
{{0xf834749c5ba295a0, 0xd6947c5bca37d25a, 0x66f13ba7e7c9316a, 0x56bdaf238db40cac}}},
{{{0x362ab9e3f53533eb, 0x338568d56eb93d40, 0x9e0e14521d5a5572, 0x1d24a86d83741318}},
{{0x1310d36cc19d3bb2, 0x062a6bb7622386b9, 0x7c9b8591d7a14f5c, 0x03aa31507e1e5754}},
{{0xf4ec7648ffd4ce1f, 0xe045eaf054ac8c1c, 0x88d225821d09357c, 0x43b261dc9aeb4859}}},
{{{0xe55b1e1988bb79bb, 0xa09ed07dc17a359d, 0xb02c2ee2603dea33, 0x326055cf5b276bc2}},
{{0x19513d8b6c951364, 0x94fe7126000bf47b, 0x028d10ddd54f9567, 0x02b4d5e242940964}},
{{0xb4a155cb28d18df2, 0xeacc4646186ce508, 0xc49cf4936c824389, 0x27a6c809ae5d3410}}},
{{{0x8ba6ebcd1f0db188, 0x37d3d73a675a5be8, 0xf22edfa315f5585a, 0x2cb67174ff60a17e}},
{{0xcd2c270ac43d6954, 0xdd4a3e576a66cab2, 0x79fa592469d7036c, 0x221503603d8c2599}},
{{0x59eecdf9390be1d0, 0xa9422044728ce3f1, 0x82891c667a94f0f4, 0x7b1df4b73890f436}}},
{{{0xe492f2e0b3b2a224, 0x7c6c9e062b551160, 0x15eb8fe20d7f7b0e, 0x61fcef2658fc5992}},
{{0x5f2e221807f8f58c, 0xe3555c9fd49409d4, 0xb2aaa88d1fb6a630, 0x68698245d352e03d}},
{{0xdbb15d852a18187a, 0xf3e4aad386ddacd7, 0x44bae2810ff6c482, 0x46cf4c473daf01cf}}},
{{{0x426525ed9ec4e5f9, 0x0e5eda0116903303, 0x72b1a7f2cbe5cadc, 0x29387bcd14eb5f40}},
{{0x213c6ea7f1498140, 0x7c1e7ef8392b4854, 0x2488c38c5629ceba, 0x1065aae50d8cc5bb}},
{{0x1c2c4525df200d57, 0x5c3b2dd6bfca674a, 0x0a07e7b1e1834030, 0x69a198e64f1ce716}}},
{{{0x9062b2e0d91a78bc, 0x47c9889cc8509667, 0x9df54a66405070b8, 0x7369e6a92493a1bf}},
{{0xe1014434dcc5caed, 0x47ed5d963c84fb33, 0x70019576ed86a0e7, 0x25b2697bd267f9e4}},
{{0x9d673ffb13986864, 0x3ca5fbd9415dc7b8, 0xe04ecc3bdf273b5e, 0x1420683db54e4cd2}}},
{{{0xb478bd1e249dd197, 0x620c35005e58c102, 0xfb02d32fccbaac5c, 0x60b63bebf508a72d}},
{{0x34eebb6fc1cc5ad0, 0x6a1b0ce99646ac8b, 0xd3b0da49a66bde53, 0x31e83b4161d081c1}},
{{0x97e8c7129e062b4f, 0x49e48f4f29320ad8, 0x5bece14b6f18683f, 0x55cf1eb62d550317}}},
{{{0x5879101065c23d58, 0x8b9d086d5094819c, 0xe2402fa912c55fa7, 0x669a6564570891d4}},
{{0x3076b5e37df58c52, 0xd73ab9dde799cc36, 0xbd831ce34913ee20, 0x1a56fbaa62ba0133}},
{{0x943e6b505c9dc9ec, 0x302557bba77c371a, 0x9873ae5641347651, 0x13c4836799c58a5c}}},
{{{0x423a5d465ab3e1b9, 0xfc13c187c7f13f61, 0x19f83664ecb5b9b6, 0x66f80c93a637b607}},
{{0xc4dcfb6a5d8bd080, 0xdeebc4ec571a4842, 0xd4b2e883b8e55365, 0x50bdc87dc8e5b827}},
{{0x606d37836edfe111, 0x32353e15f011abd9, 0x64b03ac325b73b96, 0x1dd56444725fd5ae}}},
{{{0x8fa47ff83362127d, 0xbc9f6ac471cd7c15, 0x6e71454349220c8b, 0x0e645912219f732e}},
{{0xc297e60008bac89a, 0x7d4cea11eae1c3e0, 0xf3e38be19fe7977c, 0x3a3a450f63a305cd}},
{{0x078f2f31d8394627, 0x389d3183de94a510, 0xd1e36c6d17996f80, 0x318c8d9393a9a87b}}},
{{{0xf2745d032afffe19, 0x0c9f3c497f24db66, 0xbc98d3e3ba8598ef, 0x224c7c679a1d5314}},
{{0x5d669e29ab1dd398, 0xfc921658342d9e3b, 0x55851dfdf35973cd, 0x509a41c325950af6}},
{{0xbdc06edca6f925e9, 0x793ef3f4641b1f33, 0x82ec12809d833e89, 0x05bff02328a11389}}},
{{{0x3632137023cae00b, 0x544acf0ad1accf59, 0x96741049d21a1c88, 0x780b8cc3fa2a44a7}},
{{0x6881a0dd0dc512e4, 0x4fe70dc844a5fafe, 0x1f748e6b8f4a5240, 0x576277cdee01a3ea}},
{{0x1ef38abc234f305f, 0x9a577fbd1405de08, 0x5e82a51434e62a0d, 0x5ff418726271b7a1}}},
{{{0x398e080c1789db9d, 0xa7602025f3e778f5, 0xfa98894c06bd035d, 0x106a03dc25a966be}},
{{0xe5db47e813b69540, 0xf35d2a3b432610e1, 0xac1f26e938781276, 0x29d4db8ca0a0cb69}},
{{0xd9ad0aaf333353d0, 0x38669da5acd309e5, 0x3c57658ac888f7f0, 0x4ab38a51052cbefa}}},
{{{0xda7c2b256768d593, 0x98c1c0574422ca13, 0xf1a80bd5ca0ace1d, 0x29cdd1adc088a690}},
{{0xd6cfd1ef5fddc09c, 0xe82b3efdf7575dce, 0x25d56b5d201634c2, 0x3041c6bb04ed2b9b}},
{{0x0ff2f2f9d956e148, 0xade797759f356b2e, 0x1a4698bb5f6c025c, 0x104bbd6814049a7b}}},
{{{0x51f0fd3168f1ed67, 0x2c811dcdd86f3bc2, 0x44dc5c4304d2f2de, 0x5be8cc57092a7149}},
{{0xa95d9a5fd67ff163, 0xe92be69d4cc75681, 0xb7f8024cde20f257, 0x204f2a20fb072df5}},
{{0xc8143b3d30ebb079, 0x7589155abd652e30, 0x653c3c318f6d5c31, 0x2570fb17c279161f}}},
{{{0x3efa367f2cb61575, 0xf5f96f761cd6026c, 0xe8c7142a65b52562, 0x3dcb65ea53030acd}},
{{0x192ea9550bb8245a, 0xc8e6fba88f9050d1, 0x7986ea2d88a4c935, 0x241c5f91de018668}},
{{0x28d8172940de6caa, 0x8fbf2cf022d9733a, 0x16d7fcdd235b01d1, 0x08420edd5fcdf0e5}}},
{{{0xcdff20ab8362fa4a, 0x57e118d4e21a3e6e, 0xe3179617fc39e62b, 0x0d9a53efbc1769fd}},
{{0x0358c34e04f410ce, 0xb6135b5a276e0685, 0x5d9670c7ebb91521, 0x04d654f321db889c}},
{{0x5e7dc116ddbdb5d5, 0x2954deb68da5dd2d, 0x1cb608173334a292, 0x4a7a4f2618991ad7}}},
{{{0xf4a718025fb15f95, 0x3df65f346b5c1b8f, 0xcdfcf08500e01112, 0x11b50c4cddd31848}},
{{0x24c3b291af372a4b, 0x93da8270718147f2, 0xdd84856486899ef2, 0x4a96314223e0ee33}},
{{0xa6e8274408a4ffd6, 0x738e177e9c1576d9, 0x773348b63d02b3f2, 0x4f4bce4dce6bcc51}}},
{{{0xa71fce5ae2242584, 0x26ea725692f58a9e, 0xd21a09d71cea3cf4, 0x73fcdd14b71c01e6}},
{{0x30e2616ec49d0b6f, 0xe456718fcaec2317, 0x48eb409bf26b4fa6, 0x3042cee561595f37}},
{{0x427e7079449bac41, 0x855ae36dbce2310a, 0x4cae76215f841a7c, 0x389e740c9a9ce1d6}}},
{{{0x64fcb3ae34dcb9ce, 0x97500323e348d0ad, 0x45b3f07d62c6381b, 0x61545379465a6788}},
{{0xc9bd78f6570eac28, 0xe55b0b3227919ce1, 0x65fc3eaba19b91ed, 0x25c425e5d6263690}},
{{0x3f3e06a6f1d7de6e, 0x3ef976278e062308, 0x8c14f6264e8a6c77, 0x6539a08915484759}}},
{{{0xe9d21f74c3d2f773, 0xc150544125c46845, 0x624e5ce8f9b99e33, 0x11c5e4aac5cd186c}},
{{0xddc4dbd414bb4a19, 0x19b2bc3c98424f8e, 0x48a89fd736ca7169, 0x0f65320ef019bd90}},
{{0xd486d1b1cafde0c6, 0x4f3fe6e3163b5181, 0x59a8af0dfaf2939a, 0x4cabc7bdec33072a}}},
{{{0x239e9624089c0a2e, 0xc748c4c03afe4738, 0x17dbed2a764fa12a, 0x639b93f0321c8582}},
{{0xc08f788f3f78d289, 0xfe30a72ca1404d9f, 0xf2778bfccf65cc9d, 0x7ee498165acb2021}},
{{0x7bd508e39111a1c3, 0x2b2b90d480907489, 0xe7d2aec2ae72fd19, 0x0edf493c85b602a6}}},
{{{0xaecc8158599b5a68, 0xea574f0febade20e, 0x4fe41d7422b67f07, 0x403b92e3019d4fb4}},
{{0x6767c4d284764113, 0xa090403ff7f5f835, 0x1c8fcffacae6bede, 0x04c00c54d1dfa369}},
{{0x4dc22f818b465cf8, 0x71a0f35a1480eff8, 0xaee8bfad04c7d657, 0x355bb12ab26176f4}}},
{{{0xa71e64cc7493bbf4, 0xe5bd84d9eca3b0c3, 0x0a6bc50cfa05e785, 0x0f9b8132182ec312}},
{{0xa301dac75a8c7318, 0xed90039db3ceaa11, 0x6f077cbf3bae3f2d, 0x7518eaf8e052ad8e}},
{{0xa48859c41b7f6c32, 0x0f2d60bcf4383298, 0x1815a929c9b1d1d9, 0x47c3871bbb1755c4}}},
{{{0x5144539771ec4f48, 0xf805b17dc98c5d6e, 0xf762c11a47c3c66b, 0x00b89b85764699dc}},
{{0xfbe65d50c85066b0, 0x62ecc4b0b3a299b0, 0xe53754ea441ae8e0, 0x08fea02ce8d48d5f}},
{{0x824ddd7668deead0, 0xc86445204b685d23, 0xb514cfcd5d89d665, 0x473829a74f75d537}}},
{{{0x82d2da754679c418, 0xe63bd7d8b2618df0, 0x355eef24ac47eb0a, 0x2078684c4833c6b4}},
{{0x23d9533aad3902c9, 0x64c2ddceef03588f, 0x15257390cfe12fb4, 0x6c668b4d44e4d390}},
{{0x3b48cf217a78820c, 0xf76a0ab281273e97, 0xa96c65a78c8eed7b, 0x7411a6054f8a433f}}},
{{{0x4d659d32b99dc86d, 0x044cdc75603af115, 0xb34c712cdcc2e488, 0x7c136574fb8134ff}},
{{0x579ae53d18b175b4, 0x68713159f392a102, 0x8455ecba1eef35f5, 0x1ec9a872458c398f}},
{{0xb8e6a4d400a2509b, 0x9b81d7020bc882b4, 0x57e7cc9bf1957561, 0x3add88a5c7cd6460}}},
{{{0xab895770b635dcf2, 0x02dfef6cf66c1fbc, 0x85530268beb6d187, 0x249929fccc879e74}},
{{0x85c298d459393046, 0x8f7e35985ff659ec, 0x1d2ca22af2f66e3a, 0x61ba1131a406a720}},
{{0xa3d0a0f116959029, 0x023b6b6cba7ebd89, 0x7bf15a3e26783307, 0x5620310cbbd8ece7}}},
{{{0x528993434934d643, 0xb9dbf806a51222f5, 0x8f6d878fc3f41c22, 0x37676a2a4d9d9730}},
{{0x6646b5f477e285d6, 0x40e8ff676c8f6193, 0xa6ec7311abb594dd, 0x7ec846f3658cec4d}},
{{0x9b5e8f3f1da22ec7, 0x130f1d776c01cd13, 0x214c8fcfa2989fb8, 0x6daaf723399b9dd5}}},
{{{0x5f3a7562eb3dbe47, 0xf7ea38548ebda0b8, 0x00c3e53145747299, 0x1304e9e71627d551}},
{{0x583b04bfacad8ea2, 0x29b743e8148be884, 0x2b1e583b0810c5db, 0x2b5449e58eb3bbaa}},
{{0x789814d26adc9cfe, 0x3c1bab3f8b48dd0b, 0xda0fe1fff979c60a, 0x4468de2d7c2dd693}}},
{{{0x51bb355e9419469e, 0x33e6dc4c23ddc754, 0x93a5b6d6447f9962, 0x6cce7c6ffb44bd63}},
{{0x4b9ad8c6f86307ce, 0x21113531435d0c28, 0xd4a866c5657a772c, 0x5da6427e63247352}},
{{0x1a94c688deac22ca, 0xb9066ef7bbae1ff8, 0x88ad8c388d59580f, 0x58f29abfe79f2ca8}}},
{{{0xe90ecfab8de73e68, 0x54036f9f377e76a5, 0xf0495b0bbe015982, 0x577629c4a7f41e36}},
{{0x4b5a64bf710ecdf6, 0xb14ce538462c293c, 0x3643d056d50b3ab9, 0x6af93724185b4870}},
{{0x3220024509c6a888, 0xd2e036134b558973, 0x83e236233c33289f, 0x701f25bb0caec18f}}},
{{{0xc3a8b0f8e4616ced, 0xf700660e9e25a87d, 0x61e3061ff4bca59c, 0x2e0c92bfbdc40be9}},
{{0x9d18f6d97cbec113, 0x844a06e674bfdbe4, 0x20f5b522ac4e60d6, 0x720a5bc050955e51}},
{{0x0c3f09439b805a35, 0xe84e8b376242abfc, 0x691417f35c229346, 0x0e9b9cbb144ef0ec}}},
{{{0xfbbad48ffb5720ad, 0xee81916bdbf90d0e, 0xd4813152635543bf, 0x221104eb3f337bd8}},
{{0x8dee9bd55db1beee, 0xc9c3ab370a723fb9, 0x44a8f1bf1c68d791, 0x366d44191cfd3cde}},
{{0x9e3c1743f2bc8c14, 0x2eda26fcb5856c3b, 0xccb82f0e68a7fb97, 0x4167a4e6bc593244}}},
{{{0x643b9d2876f62700, 0x5d1d9d400e7668eb, 0x1b4b430321fc0684, 0x7938bb7e2255246a}},
{{0xc2be2665f8ce8fee, 0xe967ff14e880d62c, 0xf12e6e7e2f364eee, 0x34b33370cb7ed2f6}},
{{0xcdc591ee8681d6cc, 0xce02109ced85a753, 0xed7485c158808883, 0x1176fc6e2dfe65e4}}},
{{{0xb4af6cd05b9c619b, 0x2ddfc9f4b2a58480, 0x3d4fa502ebe94dc4, 0x08fc3a4c677d5f34}},
{{0xdb90e28949770eb8, 0x98fbcc2aacf440a3, 0x21354ffeded7879b, 0x1f6a3e54f26906b6}},
{{0x60a4c199d30734ea, 0x40c085b631165cd6, 0xe2333e23f7598295, 0x4f2fad0116b900d1}}},
{{{0x44beb24194ae4e54, 0x5f541c511857ef6c, 0xa61e6b2d368d0498, 0x445484a4972ef7ab}},
{{0x962cd91db73bb638, 0xe60577aafc129c08, 0x6f619b39f3b61689, 0x3451995f2944ee81}},
{{0x9152fcd09fea7d7c, 0x4a816c94b0935cf6, 0x258e9aaa47285c40, 0x10b89ca6042893b7}}},
{{{0x3d5947499718289c, 0x12ebf8c524533f26, 0x0262bfcb14c3ef15, 0x20b878d577b7518e}},
{{0x753941be5a45f06e, 0xd07caeed6d9c5f65, 0x11776b9c72ff51b6, 0x17d2d1d9ef0d4da9}},
{{0x27f2af18073f3e6a, 0xfd3fe519d7521069, 0x22e3b72c3ca60022, 0x72214f63cc65c6a7}}},
{{{0xb4e37f405307a693, 0xaba714d72f336795, 0xd6fbd0a773761099, 0x5fdf48c58171cbc9}},
{{0x1d9db7b9f43b29c9, 0xd605824a4f518f75, 0xf2c072bd312f9dc4, 0x1f24ac855a1545b0}},
{{0x24d608328e9505aa, 0x4748c1d10c1420ee, 0xc7ffe45c06fb25a2, 0x00ba739e2ae395e6}}},
{{{0x592e98de5c8790d6, 0xe5bfb7d345c2a2df, 0x115a3b60f9b49922, 0x03283a3e67ad78f3}},
{{0xae4426f5ea88bb26, 0x360679d984973bfb, 0x5c9f030c26694e50, 0x72297de7d518d226}},
{{0x48241dc7be0cb939, 0x32f19b4d8b633080, 0xd3dfc90d02289308, 0x05e1296846271945}}},
{{{0xba82eeb32d9c495a, 0xceefc8fcf12bb97c, 0xb02dabae93b5d1e0, 0x39c00c9c13698d9b}},
{{0xadbfbbc8242c4550, 0xbcc80cecd03081d9, 0x843566a6f5c8df92, 0x78cf25d38258ce4c}},
{{0x15ae6b8e31489d68, 0xaa851cab9c2bf087, 0xc9a75a97f04efa05, 0x006b52076b3ff832}}},
{{{0x29e0cfe19d95781c, 0xb681df18966310e2, 0x57df39d370516b39, 0x4d57e3443bc76122}},
{{0xf5cb7e16b9ce082d, 0x3407f14c417abc29, 0xd4b36bce2bf4a7ab, 0x7de2e9561a9f75ce}},
{{0xde70d4f4b6a55ecb, 0x4801527f5d85db99, 0xdbc9c440d3ee9a81, 0x6b2a90af1a6029ed}}},
{{{0x6923f4fc9ae61e97, 0x5735281de03f5fd1, 0xa764ae43e6edd12d, 0x5fd8f4e9d12d3e4a}},
{{0x77ebf3245bb2d80a, 0xd8301b472fb9079b, 0xc647e6f24cee7333, 0x465812c8276c2109}},
{{0x4d43beb22a1062d9, 0x7065fb753831dc16, 0x180d4a7bde2968d7, 0x05b32c2b1cb16790}}},
{{{0xc8c05eccd24da8fd, 0xa1cf1aac05dfef83, 0xdbbeeff27df9cd61, 0x3b5556a37b471e99}},
{{0xf7fca42c7ad58195, 0x3214286e4333f3cc, 0xb6c29d0d340b979d, 0x31771a48567307e1}},
{{0x32b0c524e14dd482, 0xedb351541a2ba4b6, 0xa3d16048282b5af3, 0x4fc079d27a7336eb}}},
{{{0x51c938b089bf2f7f, 0x2497bd6502dfe9a7, 0xffffc09c7880e453, 0x124567cecaf98e92}},
{{0xdc348b440c86c50d, 0x1337cbc9cc94e651, 0x6422f74d643e3cb9, 0x241170c2bae3cd08}},
{{0x3ff9ab860ac473b4, 0xf0911dee0113e435, 0x4ae75060ebc6c4af, 0x3f8612966c87000d}}},
{{{0x559a0cc9782a0dde, 0x551dcdb2ea718385, 0x7f62865b31ef238c, 0x504aa7767973613d}},
{{0x9c18fcfa36048d13, 0x29159db373899ddd, 0xdc9f350b9f92d0aa, 0x26f57eee878a19d4}},
{{0x0cab2cd55687efb1, 0x5180d162247af17b, 0x85c15a344f5a2467, 0x4041943d9dba3069}}},
{{{0xc3c0eeba43ebcc96, 0x8d749c9c26ea9caf, 0xd9fa95ee1c77ccc6, 0x1420a1d97684340f}},
{{0x4b217743a26caadd, 0x47a6b424648ab7ce, 0xcb1d4f7a03fbc9e3, 0x12d931429800d019}},
{{0x00c67799d337594f, 0x5e3c5140b23aa47b, 0x44182854e35ff395, 0x1b4f92314359a012}}},
{{{0x3e5c109d89150951, 0x39cefa912de9696a, 0x20eae43f975f3020, 0x239b572a7f132dae}},
{{0x33cf3030a49866b1, 0x251f73d2215f4859, 0xab82aa4051def4f6, 0x5ff191d56f9a23f6}},
{{0x819ed433ac2d9068, 0x2883ab795fc98523, 0xef4572805593eb3d, 0x020c526a758f36cb}}},
{{{0x779834f89ed8dbbc, 0xc8f2aaf9dc7ca46c, 0xa9524cdca3e1b074, 0x02aacc4615313877}},
{{0xe931ef59f042cc89, 0x2c589c9d8e124bb6, 0xadc8e18aaec75997, 0x452cfe0a5602c50c}},
{{0x86a0f7a0647877df, 0xbbc464270e607c9f, 0xab17ea25f1fb11c9, 0x4cfb7d7b304b877b}}},
{{{0x72b43d6cb89b75fe, 0x54c694d99c6adc80, 0xb8c3aa373ee34c9f, 0x14b4622b39075364}},
{{0xe28699c29789ef12, 0x2b6ecd71df57190d, 0xc343c857ecc970d0, 0x5b1d4cbc434d3ac5}},
{{0xb6fb2615cc0a9f26, 0x3a4f0e2bb88dcce5, 0x1301498b3369a705, 0x2f98f71258592dd1}}},
{{{0x0c94a74cb50f9e56, 0x5b1ff4a98e8e1320, 0x9a2acc2182300f67, 0x3a6ae249d806aaf9}},
{{0x2e12ae444f54a701, 0xfcfe3ef0a9cbd7de, 0xcebf890d75835de0, 0x1d8062e9e7614554}},
{{0x657ada85a9907c5a, 0x1a0ea8b591b90f62, 0x8d0e1dfbdf34b4e9, 0x298b8ce8aef25ff3}}},
{{{0x2a927953eff70cb2, 0x4b89c92a79157076, 0x9418457a30a7cf6a, 0x34b8a8404d5ce485}},
{{0x837a72ea0a2165de, 0x3fab07b40bcf79f6, 0x521636c77738ae70, 0x6ba6271803a7d7dc}},
{{0xc26eecb583693335, 0xd5a813df63b5fefd, 0xa293aa9aa4b22573, 0x71d62bdd465e1c6a}}},
{{{0x6533cc28d378df80, 0xf6db43790a0fa4b4, 0xe3645ff9f701da5a, 0x74d5f317f3172ba4}},
{{0xcd2db5dab1f75ef5, 0xd77f95cf16b065f5, 0x14571fea3f49f085, 0x1c333621262b2b3d}},
{{0xa86fe55467d9ca81, 0x398b7c752b298c37, 0xda6d0892e3ac623b, 0x4aebcc4547e9d98c}}},
{{{0x12f0071b276d01c9, 0xe7b8bac586c48c70, 0x5308129b71d6fba9, 0x5d88fbf95a3db792}},
{{0x0b408d9e7354b610, 0x806b32535ba85b6e, 0xdbe63a034a58a207, 0x173bd9ddc9a1df2c}},
{{0x2b500f1efe5872df, 0x58d6582ed43918c1, 0xe6ed278ec9673ae0, 0x06e1cd13b19ea319}}},
{{{0x40d0ad516f166f23, 0x118e32931fab6abe, 0x3fe35e14a04d088e, 0x3080603526e16266}},
{{0x472baf629e5b0353, 0x3baa0b90278d0447, 0x0c785f469643bf27, 0x7f3a6a1a8d837b13}},
{{0xf7e644395d3d800b, 0x95a8d555c901edf6, 0x68cd7830592c6339, 0x30d0fded2e51307e}}},
{{{0xe0594d1af21233b3, 0x1bdbe78ef0cc4d9c, 0x6965187f8f499a77, 0x0a9214202c099868}},
{{0x9cb4971e68b84750, 0xa09572296664bbcf, 0x5c8de72672fa412b, 0x4615084351c589d9}},
{{0xbc9019c0aeb9a02e, 0x55c7110d16034cae, 0x0e6df501659932ec, 0x3bca0d2895ca5dfe}}},
{{{0x40f031bc3c5d62a4, 0x19fc8b3ecff07a60, 0x98183da2130fb545, 0x5631deddae8f13cd}},
{{0x9c688eb69ecc01bf, 0xf0bc83ada644896f, 0xca2d955f5f7a9fe2, 0x4ea8b4038df28241}},
{{0x2aed460af1cad202, 0x46305305a48cee83, 0x9121774549f11a5f, 0x24ce0930542ca463}}},
{{{0x1fe890f5fd06c106, 0xb5c468355d8810f2, 0x827808fe6e8caf3e, 0x41d4e3c28a06d74b}},
{{0x3fcfa155fdf30b85, 0xd2f7168e36372ea4, 0xb2e064de6492f844, 0x549928a7324f4280}},
{{0xf26e32a763ee1a2e, 0xae91e4b7d25ffdea, 0xbc3bd33bd17f4d69, 0x491b66dec0dcff6a}}},
{{{0x98f5b13dc7ea32a7, 0xe3d5f8cc7e16db98, 0xac0abf52cbf8d947, 0x08f338d0c85ee4ac}},
{{0x75f04a8ed0da64a1, 0xed222caf67e2284b, 0x8234a3791f7b7ba4, 0x4cf6b8b0b7018b67}},
{{0xc383a821991a73bd, 0xab27bc01df320c7a, 0xc13d331b84777063, 0x530d4a82eb078a99}}},
{{{0x004c3630e1f94825, 0x7e2d78268cab535a, 0xc7482323cc84ff8b, 0x65ea753f101770b9}},
{{0x6d6973456c9abf9e, 0x257fb2fc4900a880, 0x2bacf412c8cfb850, 0x0db3e7e00cbfbd5b}},
{{0x3d66fc3ee2096363, 0x81d62c7f61b5cb6b, 0x0fbe044213443b1a, 0x02a4ec1921e1a1db}}},
{{{0x5ce6259a3b24b8a2, 0xb8577acc45afa0b8, 0xcccbe6e88ba07037, 0x3d143c51127809bf}},
{{0xf5c86162f1cf795f, 0x118c861926ee57f2, 0x172124851c063578, 0x36d12b5dec067fcf}},
{{0x126d279179154557, 0xd5e48f5cfc783a0a, 0x36bdb6e8df179bac, 0x2ef517885ba82859}}},
{{{0x4637974e8c58aedc, 0xb9ef22fbabf041a4, 0xe185d956e980718a, 0x2f1b78fab143a8a6}},
{{0x96eebffb305b2f51, 0xd3f938ad889596b8, 0xf0f52dc746d5dd25, 0x57968290bb3a0095}},
{{0xf71ab8430a20e101, 0xf393658d24f0ec47, 0xcf7509a86ee2eed1, 0x7dc43e35dc2aa3e1}}},
{{{0x85966665887dd9c3, 0xc90f9b314bb05355, 0xc6e08df8ef2079b1, 0x7ef72016758cc12f}},
{{0x5a782a5c273e9718, 0x3576c6995e4efd94, 0x0f2ed8051f237d3e, 0x044fb81d82d50a99}},
{{0xc1df18c5a907e3d9, 0x57b3371dce4c6359, 0xca704534b201bb49, 0x7f79823f9c30dd2e}}},
{{{0x8334d239a3b513e8, 0xc13670d4b91fa8d8, 0x12b54136f590bd33, 0x0a4e0373d784d9b4}},
{{0x6a9c1ff068f587ba, 0x0827894e0050c8de, 0x3cbf99557ded5be7, 0x64a9b0431c06d6f0}},
{{0x2eb3d6a15b7d2919, 0xb0b4f6a0d53a8235, 0x7156ce4389a45d47, 0x071a7d0ace18346c}}},
{{{0xd3072daac887ba0b, 0x01262905bfa562ee, 0xcf543002c0ef768b, 0x2c3bcc7146ea7e9c}},
{{0xcc0c355220e14431, 0x0d65950709b15141, 0x9af5621b209d5f36, 0x7c69bcf7617755d3}},
{{0x07f0d7eb04e8295f, 0x10db18252f50f37d, 0xe951a9a3171798d7, 0x6f5a9a7322aca51d}}},
{{{0x8ba1000c2f41c6c5, 0xc49f79c10cfefb9b, 0x4efa47703cc51c9f, 0x494e21a2e147afca}},
{{0xe729d4eba3d944be, 0x8d9e09408078af9e, 0x4525567a47869c03, 0x02ab9680ee8d3b24}},
{{0xefa48a85dde50d9a, 0x219a224e0fb9a249, 0xfa091f1dd91ef6d9, 0x6b5d76cbea46bb34}}},
{{{0x8857556cec0cd994, 0x6472dc6f5cd01dba, 0xaf0169148f42b477, 0x0ae333f685277354}},
{{0xe0f941171e782522, 0xf1e6ae74036936d3, 0x408b3ea2d0fcc746, 0x16fb869c03dd313e}},
{{0x288e199733b60962, 0x24fc72b4d8abe133, 0x4811f7ed0991d03e, 0x3f81e38b8f70d075}}},
{{{0x7f910fcc7ed9affe, 0x545cb8a12465874b, 0xa8397ed24b0c4704, 0x50510fc104f50993}},
{{0x0adb7f355f17c824, 0x74b923c3d74299a4, 0xd57c3e8bcbf8eaf7, 0x0ad3e2d34cdedc3d}},
{{0x6f0c0fc5336e249d, 0x745ede19c331cfd9, 0xf2d6fd0009eefe1c, 0x127c158bf0fa1ebe}}},
{{{0xf6197c422e9879a2, 0xa44addd452ca3647, 0x9b413fc14b4eaccb, 0x354ef87d07ef4f68}},
{{0xdea28fc4ae51b974, 0x1d9973d3744dfe96, 0x6240680b873848a8, 0x4ed82479d167df95}},
{{0xfee3b52260c5d975, 0x50352efceb41b0b8, 0x8808ac30a9f6653c, 0x302d92d20539236d}}},
{{{0x7813c1a2bca4283d, 0xed62f091a1863dd9, 0xaec7bcb8c268fa86, 0x10e5d3b76f1cae4c}},
{{0x2dbc6fb6e4e0f177, 0x04e1bf29a4bd6a93, 0x5e1966d4787af6e8, 0x0edc5f5eb426d060}},
{{0x5453bfd653da8e67, 0xe9dc1eec24a9f641, 0xbf87263b03578a23, 0x45b46c51361cba72}}},
{{{0xa9402abf314f7fa1, 0xe257f1dc8e8cf450, 0x1dbbd54b23a8be84, 0x2177bfa36dcb713b}},
{{0xce9d4ddd8a7fe3e4, 0xab13645676620e30, 0x4b594f7bb30e9958, 0x5c1c0aef321229df}},
{{0x37081bbcfa79db8f, 0x6048811ec25f59b3, 0x087a76659c832487, 0x4ae619387d8ab5bb}}},
{{{0x8ddbf6aa5344a32e, 0x7d88eab4b41b4078, 0x5eb0eb974a130d60, 0x1a00d91b17bf3e03}},
{{0x61117e44985bfb83, 0xfce0462a71963136, 0x83ac3448d425904b, 0x75685abe5ba43d64}},
{{0x6e960933eb61f2b2, 0x543d0fa8c9ff4952, 0xdf7275107af66569, 0x135529b623b0e6aa}}},
{{{0x18f0dbd7add1d518, 0x979f7888cfc11f11, 0x8732e1f07114759b, 0x79b5b81a65ca3a01}},
{{0xf5c716bce22e83fe, 0xb42beb19e80985c1, 0xec9da63714254aae, 0x5972ea051590a613}},
{{0x0fd4ac20dc8f7811, 0x9a9ad294ac4d4fa8, 0xc01b2d64b3360434, 0x4f7e9c95905f3bdb}}},
{{{0x62674bbc5781302e, 0xd8520f3989addc0f, 0x8c2999ae53fbd9c6, 0x31993ad92e638e4c}},
{{0x71c8443d355299fe, 0x8bcd3b1cdbebead7, 0x8092499ef1a49466, 0x1942eec4a144adc8}},
{{0x7dac5319ae234992, 0x2c1b3d910cea3e92, 0x553ce494253c1122, 0x2a0a65314ef9ca75}}},
{{{0x2db7937ff7f927c2, 0xdb741f0617d0a635, 0x5982f3a21155af76, 0x4cf6e218647c2ded}},
{{0xcf361acd3c1c793a, 0x2f9ebcac5a35bc3b, 0x60e860e9a8cda6ab, 0x055dc39b6dea1a13}},
{{0xb119227cc28d5bb6, 0x07e24ebc774dffab, 0xa83c78cee4a32c89, 0x121a307710aa24b6}}},
{{{0xe4db5d5e9f034a97, 0xe153fc093034bc2d, 0x460546919551d3b1, 0x333fc76c7a40e52d}},
{{0xd659713ec77483c9, 0x88bfe077b82b96af, 0x289e28231097bcd3, 0x527bb94a6ced3a9b}},
{{0x563d992a995b482e, 0x3405d07c6e383801, 0x485035de2f64d8e5, 0x6b89069b20a7a9f7}}},
{{{0x812aa0416270220d, 0x995a89faf9245b4e, 0xffadc4ce5072ef05, 0x23bc2103aa73eb73}},
{{0x4082fa8cb5c7db77, 0x068686f8c734c155, 0x29e6c8d9f6e7a57e, 0x0473d308a7639bcf}},
{{0xcaee792603589e05, 0x2b4b421246dcc492, 0x02a1ef74e601a94f, 0x102f73bfde04341a}}},
{{{0xeb18b9ab7f5745c6, 0x023a8aee5787c690, 0xb72712da2df7afa9, 0x36597d25ea5c013d}},
{{0xa2b4dae0b5511c9a, 0x7ac860292bffff06, 0x981f375df5504234, 0x3f6bd725da4ea12d}},
{{0x734d8d7b106058ac, 0xd940579e6fc6905f, 0x6466f8f99202932d, 0x7b7ecc19da60d6d0}}},
{{{0x78c2373c695c690d, 0xdd252e660642906e, 0x951d44444ae12bd2, 0x4235ad7601743956}},
{{0x6dae4a51a77cfa9b, 0x82263654e7a38650, 0x09bbffcd8f2d82db, 0x03bedc661bf5caba}},
{{0x6258cb0d078975f5, 0x492942549189f298, 0xa0cab423e2e36ee4, 0x0e7ce2b0cdf066a1}}},
{{{0xc494643ac48c85a3, 0xfd361df43c6139ad, 0x09db17dd3ae94d48, 0x666e0a5d8fb4674a}},
{{0xfea6fedfd94b70f9, 0xf130c051c1fcba2d, 0x4882d47e7f2fab89, 0x615256138aeceeb5}},
{{0x2abbf64e4870cb0d, 0xcd65bcf0aa458b6b, 0x9abe4eba75e8985d, 0x7f0bc810d514dee4}}},
{{{0xb9006ba426f4136f, 0x8d67369e57e03035, 0xcbc8dfd94f463c28, 0x0d1f8dbcf8eedbf5}},
{{0x83ac9dad737213a0, 0x9ff6f8ba2ef72e98, 0x311e2edd43ec6957, 0x1d3a907ddec5ab75}},
{{0xba1693313ed081dc, 0x29329fad851b3480, 0x0128013c030321cb, 0x00011b44a31bfde3}}},
{{{0x3fdfa06c3fc66c0c, 0x5d40e38e4dd60dd2, 0x7ae38b38268e4d71, 0x3ac48d916e8357e1}},
{{0x16561f696a0aa75c, 0xc1bf725c5852bd6a, 0x11a8dd7f9a7966ad, 0x63d988a2d2851026}},
{{0x00120753afbd232e, 0xe92bceb8fdd8f683, 0xf81669b384e72b91, 0x33fad52b2368a066}}},
{{{0x540649c6c5e41e16, 0x0af86430333f7735, 0xb2acfcd2f305e746, 0x16c0f429a256dca7}},
{{0x8d2cc8d0c422cfe8, 0x072b4f7b05a13acb, 0xa3feb6e6ecf6a56f, 0x3cc355ccb90a71e2}},
{{0xe9b69443903e9131, 0xb8a494cb7a5637ce, 0xc87cd1a4baba9244, 0x631eaf426bae7568}}},
{{{0xb3e90410da66fe9f, 0x85dd4b526c16e5a6, 0xbc3d97611ef9bf83, 0x5599648b1ea919b5}},
{{0x47d975b9a3700de8, 0x7280c5fbe2f80552, 0x53658f2732e45de1, 0x431f2c7f665f80b5}},
{{0xd6026344858f7b19, 0x14ab352fa1ea514a, 0x8900441a2090a9d7, 0x7b04715f91253b26}}},
{{{0x83edbd28acf6ae43, 0x86357c8b7d5c7ab4, 0xc0404769b7eb2c44, 0x59b37bf5c2f6583f}},
{{0xb376c280c4e6bac6, 0x970ed3dd6d1d9b0b, 0xb09a9558450bf944, 0x48d0acfa57cde223}},
{{0xb60f26e47dabe671, 0xf1d1a197622f3a37, 0x4208ce7ee9960394, 0x16234191336d3bdb}}},
{{{0xb9e499def6267ff6, 0x7772ca7b742c0843, 0x23a0153fe9a4f2b1, 0x2cdfdfecd5d05006}},
{{0xdd499cd61ff38640, 0x29cd9bc3063625a0, 0x51e2d8023dd73dc3, 0x4a25707a203b9231}},
{{0x2ab7668a53f6ed6a, 0x304242581dd170a1, 0x4000144c3ae20161, 0x5721896d248e49fc}}},
{{{0x0b6e5517fd181bae, 0x9022629f2bb963b4, 0x5509bce932064625, 0x578edd74f63c13da}},
{{0x285d5091a1d0da4e, 0x4baa6fa7b5fe3e08, 0x63e5177ce19393b3, 0x03c935afc4b030fd}},
{{0x997276c6492b0c3d, 0x47ccc2c4dfe205fc, 0xdcd29b84dd623a3c, 0x3ec2ab590288c7a2}}},
{{{0xa1a0d27be4d87bb9, 0xa98b4deb61391aed, 0x99a0ddd073cb9b83, 0x2dd5c25a200fcace}},
{{0xa7213a09ae32d1cb, 0x0f2b87df40f5c2d5, 0x0baea4c6e81eab29, 0x0e1bf66c6adbac5e}},
{{0xe2abd5e9792c887e, 0x1a020018cb926d5d, 0xbfba69cdbaae5f1e, 0x730548b35ae88f5f}}},
{{{0xc43551a3cba8b8ee, 0x65a26f1db2115f16, 0x760f4f52ab8c3850, 0x3043443b411db8ca}},
{{0x805b094ba1d6e334, 0xbf3ef17709353f19, 0x423f06cb0622702b, 0x585a2277d87845dd}},
{{0xa18a5f8233d48962, 0x6698c4b5ec78257f, 0xa78e6fa5373e41ff, 0x7656278950ef981f}}},
{{{0x38c3cf59d51fc8c0, 0x9bedd2fd0506b6f2, 0x26bf109fab570e8f, 0x3f4160a8c1b846a6}},
{{0xe17073a3ea86cf9d, 0x3a8cfbb707155fdc, 0x4853e7fc31838a8e, 0x28bbf484b613f616}},
{{0xf2612f5c6f136c7c, 0xafead107f6dd11be, 0x527e9ad213de6f33, 0x1e79cb358188f75d}}},
{{{0x013436c3eef7e3f1, 0x828b6a7ffe9e10f8, 0x7ff908e5bcf9defc, 0x65d7951b3a3b3831}},
{{0x77e953d8f5e08181, 0x84a50c44299dded9, 0xdc6c2d0c864525e5, 0x478ab52d39d1f2f4}},
{{0x66a6a4d39252d159, 0xe5dde1bc871ac807, 0xb82c6b40a6c1c96f, 0x16d87a411a212214}}},
{{{0xb3bd7e5a42066215, 0x879be3cd0c5a24c1, 0x57c05db1d6f994b7, 0x28f87c8165f38ca6}},
{{0xfba4d5e2d54e0583, 0xe21fafd72ebd99fa, 0x497ac2736ee9778f, 0x1f990b577a5a6dde}},
{{0xa3344ead1be8f7d6, 0x7d1e50ebacea798f, 0x77c6569e520de052, 0x45882fe1534d6d3e}}},
{{{0x6669345d757983d6, 0x62b6ed1117aa11a6, 0x7ddd1857985e128f, 0x688fe5b8f626f6dd}},
{{0xd8ac9929943c6fe4, 0xb5f9f161a38392a2, 0x2699db13bec89af3, 0x7dcf843ce405f074}},
{{0x6c90d6484a4732c0, 0xd52143fdca563299, 0xb3be28c3915dc6e1, 0x6739687e7327191b}}},
{{{0xef782014385675a6, 0xa2649f30aafda9e8, 0x4cd1eb505cdfa8cb, 0x46115aba1d4dc0b3}},
{{0xa66dcc9dc80c1ac0, 0x97a05cf41b38a436, 0xa7ebf3be95dbd7c6, 0x7da0b8f68d7e7dab}},
{{0xd40f1953c3b5da76, 0x1dac6f7321119e9b, 0x03cc6021feb25960, 0x5a5f887e83674b4b}}},
{{{0x8f6301cf70a13d11, 0xcfceb815350dd0c4, 0xf70297d4a4bca47e, 0x3669b656e44d1434}},
{{0x9e9628d3a0a643b9, 0xb5c3cb00e6c32064, 0x9b5302897c2dec32, 0x43e37ae2d5d1c70c}},
{{0x387e3f06eda6e133, 0x67301d5199a13ac0, 0xbd5ad8f836263811, 0x6a21e6cd4fd5e9be}}},
{{{0xf1c6170a3046e65f, 0x58712a2a00d23524, 0x69dbbd3c8c82b755, 0x586bf9f1a195ff57}},
{{0xef4129126699b2e3, 0x71d30847708d1301, 0x325432d01182b0bd, 0x45371b07001e8b36}},
{{0xa6db088d5ef8790b, 0x5278f0dc610937e5, 0xac0349d261a16eb8, 0x0eafb03790e52179}}},
{{{0x960555c13748042f, 0x219a41e6820baa11, 0x1c81f73873486d0c, 0x309acc675a02c661}},
{{0x5140805e0f75ae1d, 0xec02fbe32662cc30, 0x2cebdf1eea92396d, 0x44ae3344c5435bb3}},
{{0x9cf289b9bba543ee, 0xf3760e9d5ac97142, 0x1d82e5c64f9360aa, 0x62d5221b7f94678f}}},
{{{0x524c299c18d0936d, 0xc86bb56c8a0c1a0c, 0xa375052edb4a8631, 0x5c0efde4bc754562}},
{{0x7585d4263af77a3c, 0xdfae7b11fee9144d, 0xa506708059f7193d, 0x14f29a5383922037}},
{{0xdf717edc25b2d7f5, 0x21f970db99b53040, 0xda9234b7c3ed4c62, 0x5e72365c7bee093e}}},
{{{0x575bfc074571217f, 0x3779675d0694d95b, 0x9a0a37bbf4191e33, 0x77f1104c47b4eabc}},
{{0x7d9339062f08b33e, 0x5b9659e5df9f32be, 0xacff3dad1f9ebdfd, 0x70b20555cb7349b7}},
{{0xbe5113c555112c4c, 0x6688423a9a881fcd, 0x446677855e503b47, 0x0e34398f4a06404a}}},
{{{0xb67d22d93ecebde8, 0x09b3e84127822f07, 0x743fa61fb05b6d8d, 0x5e5405368a362372}},
{{0x18930b093e4b1928, 0x7de3e10e73f3f640, 0xf43217da73395d6f, 0x6f8aded6ca379c3e}},
{{0xe340123dfdb7b29a, 0x487b97e1a21ab291, 0xf9967d02fde6949e, 0x780de72ec8d3de97}}},
{{{0x0ae28545089ae7bc, 0x388ddecf1c7f4d06, 0x38ac15510a4811b8, 0x0eb28bf671928ce4}},
{{0x671feaf300f42772, 0x8f72eb2a2a8c41aa, 0x29a17fd797373292, 0x1defc6ad32b587a6}},
{{0xaf5bbe1aef5195a7, 0x148c1277917b15ed, 0x2991f7fb7ae5da2e, 0x467d201bf8dd2867}}},
{{{0x95fe919a74ef4fad, 0x3a827becf6a308a2, 0x964e01d309a47b01, 0x71c43c4f5ba3c797}},
{{0xbc1ef4bd567ae7a9, 0x3f624cb2d64498bd, 0xe41064d22c1f4ec8, 0x2ef9c5a5ba384001}},
{{0xb6fd6df6fa9e74cd, 0xf18278bce4af267a, 0x8255b3d0f1ef990e, 0x5a758ca390c5f293}}},
{{{0xa2b72710d9462495, 0x3aa8c6d2d57d5003, 0xe3d400bfa0b487ca, 0x2dbae244b3eb72ec}},
{{0x8ce0918b1d61dc94, 0x8ded36469a813066, 0xd4e6a829afe8aad3, 0x0a738027f639d43f}},
{{0x980f4a2f57ffe1cc, 0x00670d0de1839843, 0x105c3f4a49fb15fd, 0x2698ca635126a69c}}},
{{{0xe765318832b0ba78, 0x381831f7925cff8b, 0x08a81b91a0291fcc, 0x1fb43dcc49caeb07}},
{{0x2e3d702f5e3dd90e, 0x9e3f0918e4d25386, 0x5e773ef6024da96a, 0x3c004b0c4afa3332}},
{{0x9aa946ac06f4b82b, 0x1ca284a5a806c4f3, 0x3ed3265fc6cd4787, 0x6b43fd01cd1fd217}}},
{{{0xc7a75d4b4697c544, 0x15fdf848df0fffbf, 0x2868b9ebaa46785a, 0x5a68d7105b52f714}},
{{0xb5c742583e760ef3, 0x75dc52b9ee0ab990, 0xbf1427c2072b923f, 0x73420b2d6ff0d9f0}},
{{0xaf2cf6cb9e851e06, 0x8f593913c62238c4, 0xda8ab89699fbf373, 0x3db5632fea34bc9e}}},
{{{0xf46eee2bf75dd9d8, 0x0d17b1f6396759a5, 0x1bf2d131499e7273, 0x04321adf49d75f13}},
{{0x2e4990b1829825d5, 0xedeaeb873e9a8991, 0xeef03d394c704af8, 0x59197ea495df2b0e}},
{{0x04e16019e4e55aae, 0xe77b437a7e2f92e9, 0xc7ce2dc16f159aa4, 0x45eafdc1f4d70cc0}}},
{{{0x698401858045d72b, 0x4c22faa2cf2f0651, 0x941a36656b222dc6, 0x5a5eebc80362dade}},
{{0xb60e4624cfccb1ed, 0x59dbc292bd5c0395, 0x31a09d1ddc0481c9, 0x3f73ceea5d56d940}},
{{0xb7a7bfd10a4e8dc6, 0xbe57007e44c9b339, 0x60c1207f1557aefa, 0x26058891266218db}}},
{{{0x59f704a68360ff04, 0xc3d93fde7661e6f4, 0x831b2a7312873551, 0x54ad0c2e4e615d57}},
{{0x4c818e3cc676e542, 0x5e422c9303ceccad, 0xec07cccab4129f08, 0x0dedfa10b24443b8}},
{{0xee3b67d5b82b522a, 0x36f163469fa5c1eb, 0xa5b4d2f26ec19fd3, 0x62ecb2baa77a9408}}},
{{{0xe5ed795261152b3d, 0x4962357d0eddd7d1, 0x7482c8d0b96b4c71, 0x2e59f919a966d8be}},
{{0x92072836afb62874, 0x5fcd5e8579e104a5, 0x5aad01adc630a14a, 0x61913d5075663f98}},
{{0x0dc62d361a3231da, 0xfa47583294200270, 0x02d801513f9594ce, 0x3ddbc2a131c05d5c}}},
{{{0x9adc0ff9ce5ec54b, 0x039c2a6b8c2f130d, 0x028007c7f0f89515, 0x78968314ac04b36b}},
{{0xf3aa57a22796bb14, 0x883abab79b07da21, 0xe54be21831a0391c, 0x5ee7fb38d83205f9}},
{{0x538dfdcb41446a8e, 0xa5acfda9434937f9, 0x46af908d263c8c78, 0x61d0633c9bca0d09}}},
{{{0x63744935ffdb2566, 0xc5bd6b89780b68bb, 0x6f1b3280553eec03, 0x6e965fd847aed7f5}},
{{0xada328bcf8fc73df, 0xee84695da6f037fc, 0x637fb4db38c2a909, 0x5b23ac2df8067bdc}},
{{0x9ad2b953ee80527b, 0xe88f19aafade6d8d, 0x0e711704150e82cf, 0x79b9bbb9dd95dedc}}},
{{{0xebb355406a3126c2, 0xd26383a868c8c393, 0x6c0c6429e5b97a82, 0x5065f158c9fd2147}},
{{0xd1997dae8e9f7374, 0xa032a2f8cfbb0816, 0xcd6cba126d445f0a, 0x1ba811460accb834}},
{{0x708169fb0c429954, 0xe14600acd76ecf67, 0x2eaab98a70e645ba, 0x3981f39e58a4faf2}}},
{{{0x18fb8a7559230a93, 0x1d168f6960e6f45d, 0x3a85a94514a93cb5, 0x38dc083705acd0fd}},
{{0xc845dfa56de66fde, 0xe152a5002c40483a, 0xe9d2e163c7b4f632, 0x30f4452edcbc1b65}},
{{0x856d2782c5759740, 0xfa134569f99cbecc, 0x8844fc73c0ea4e71, 0x632d9a1a593f2469}}},
{{{0xf6bb6b15b807cba6, 0x1823c7dfbc54f0d7, 0xbb1d97036e29670b, 0x0b24f48847ed4a57}},
{{0xbf09fd11ed0c84a7, 0x63f071810d9f693a, 0x21908c2d57cf8779, 0x3a5a7df28af64ba2}},
{{0xdcdad4be511beac7, 0xa4538075ed26ccf2, 0xe19cff9f005f9a65, 0x34fcf74475481f63}}},
{{{0xc197e04c789767ca, 0xb8714dcb38d9467d, 0x55de888283f95fa8, 0x3d3bdc164dfa63f7}},
{{0xa5bb1dab78cfaa98, 0x5ceda267190b72f2, 0x9309c9110a92608e, 0x0119a3042fb374b0}},
{{0x67a2d89ce8c2177d, 0x669da5f66895d0c1, 0xf56598e5b282a2b0, 0x56c088f1ede20a73}}},
{{{0x336d3d1110a86e17, 0xd7f388320b75b2fa, 0xf915337625072988, 0x09674c6b99108b87}},
{{0x581b5fac24f38f02, 0xa90be9febae30cbd, 0x9a2169028acf92f0, 0x038b7ea48359038f}},
{{0x9f4ef82199316ff8, 0x2f49d282eaa78d4f, 0x0971a5ab5aef3174, 0x6e5e31025969eb65}}},
{{{0xb16c62f587e593fb, 0x4999eddeca5d3e71, 0xb491c1e014cc3e6d, 0x08f5114789a8dba8}},
{{0x3304fb0e63066222, 0xfb35068987acba3f, 0xbd1924778c1061a3, 0x3058ad43d1838620}},
{{0x323c0ffde57663d0, 0x05c3df38a22ea610, 0xbdc78abdac994f9a, 0x26549fa4efe3dc99}}},
{{{0x741d5a461e6bf9d6, 0x2305b3fc7777a581, 0xd45574a26474d3d9, 0x1926e1dc6401e0ff}},
{{0xdb468549af3f666e, 0xd77fcf04f14a0ea5, 0x3df23ff7a4ba0c47, 0x3a10dfe132ce3c85}},
{{0xe07f4e8aea17cea0, 0x2fd515463a1fc1fd, 0x175322fd31f2c0f1, 0x1fa1d01d861e5d15}}},
{{{0xcc8055947d599832, 0x1e4656da37f15520, 0x99f6f7744e059320, 0x773563bc6a75cf33}},
{{0x38dcac00d1df94ab, 0x2e712bddd1080de9, 0x7f13e93efdd5e262, 0x73fced18ee9a01e5}},
{{0x06b1e90863139cb3, 0xa493da67c5a03ecd, 0x8d77cec8ad638932, 0x1f426b701b864f44}}},
{{{0xefc9264c41911c01, 0xf1a3b7b817a22c25, 0x5875da6bf30f1447, 0x4e1af5271d31b090}},
{{0xf17e35c891a12552, 0xb76b8153575e9c76, 0xfa83406f0d9b723e, 0x0b76bb1b3fa7e438}},
{{0x08b8c1f97f92939b, 0xbe6771cbd444ab6e, 0x22e5646399bb8017, 0x7b6dd61eb772a955}}},
{{{0xb7adc1e850f33d92, 0x7998fa4f608cd5cf, 0xad962dbd8dfc5bdb, 0x703e9bceaf1d2f4f}},
{{0x5730abf9ab01d2c7, 0x16fb76dc40143b18, 0x866cbe65a0cbb281, 0x53fa9b659bff6afe}},
{{0x6c14c8e994885455, 0x843a5d6665aed4e5, 0x181bb73ebcd65af1, 0x398d93e5c4c61f50}}},
{{{0x1c4bd16733e248f3, 0xbd9e128715bf0a5f, 0xd43f8cf0a10b0376, 0x53b09b5ddf191b13}},
{{0xc3877c60d2e7e3f2, 0x3b34aaa030828bb1, 0x283e26e7739ef138, 0x699c9c9002c30577}},
{{0xf306a7235946f1cc, 0x921718b5cce5d97d, 0x28cdd24781b4e975, 0x51caf30c6fcdd907}}},
{{{0xa60ba7427674e00a, 0x630e8570a17a7bf3, 0x3758563dcf3324cc, 0x5504aa292383fdaa}},
{{0x737af99a18ac54c7, 0x903378dcc51cb30f, 0x2b89bc334ce10cc7, 0x12ae29c189f8e99a}},
{{0xa99ec0cb1f0d01cf, 0x0dd1efcc3a34f7ae, 0x55ca7521d09c4e22, 0x5fd14fe958eba5ea}}},
{{{0xb5dc2ddf2845ab2c, 0x069491b10a7fe993, 0x4daaf3d64002e346, 0x093ff26e586474d1}},
{{0x3c42fe5ebf93cb8e, 0xbedfa85136d4565f, 0xe0f0859e884220e8, 0x7dd73f960725d128}},
{{0xb10d24fe68059829, 0x75730672dbaf23e5, 0x1367253ab457ac29, 0x2f59bcbc86b470a4}}},
{{{0x83847d429917135f, 0xad1b911f567d03d7, 0x7e7748d9be77aad1, 0x5458b42e2e51af4a}},
{{0x7041d560b691c301, 0x85201b3fadd7e71e, 0x16c2e16311335585, 0x2aa55e3d010828b1}},
{{0xed5192e60c07444f, 0x42c54e2d74421d10, 0x352b4c82fdb5c864, 0x13e9004a8a768664}}},
{{{0x739d8845832fcedb, 0xfa38d6c9ae6bf863, 0x32bc0dcab74ffef7, 0x73937e8814bce45e}},
{{0xbb2e00c9193b877f, 0xece3a890e0dc506b, 0xecf3b7c036de649f, 0x5f46040898de9e1a}},
{{0xb9037116297bf48d, 0xa9d13b22d4f06834, 0xe19715574696bdc6, 0x2cf8a4e891d5e835}}},
{{{0x6d93fd8707110f67, 0xdd4c09d37c38b549, 0x7cb16a4cc2736a86, 0x2049bd6e58252a09}},
{{0x2cb5487e17d06ba2, 0x24d2381c3950196b, 0xd7659c8185978a30, 0x7a6f7f2891d6a4f6}},
{{0x7d09fd8d6a9aef49, 0xf0ee60be5b3db90b, 0x4c21b52c519ebfd4, 0x6011aadfc545941d}}},
{{{0x5f67926dcf95f83c, 0x7c7e856171289071, 0xd6a1e7f3998f7a5b, 0x6fc5cc1b0b62f9e0}},
{{0x63ded0c802cbf890, 0xfbd098ca0dff6aaa, 0x624d0afdb9b6ed99, 0x69ce18b779340b1e}},
{{0xd1ef5528b29879cb, 0xdd1aae3cd47e9092, 0x127e0442189f2352, 0x15596b3ae57101f1}}},
{{{0x462739d23f9179a2, 0xff83123197d6ddcf, 0x1307deb553f2148a, 0x0d2237687b5f4dda}},
{{0x09ff31167e5124ca, 0x0be4158bd9c745df, 0x292b7d227ef556e5, 0x3aa4e241afb6d138}},
{{0x2cc138bf2a3305f5, 0x48583f8fa2e926c3, 0x083ab1a25549d2eb, 0x32fcaa6e4687a36c}}},
{{{0x7bc56e8dc57d9af5, 0x3e0bd2ed9df0bdf2, 0xaac014de22efe4a3, 0x4627e9cefebd6a5c}},
{{0x3207a4732787ccdf, 0x17e31908f213e3f8, 0xd5b2ecd7f60d964e, 0x746f6336c2600be9}},
{{0x3f4af345ab6c971c, 0xe288eb729943731f, 0x33596a8a0344186d, 0x7b4917007ed66293}}},
{{{0x2d85fb5cab84b064, 0x497810d289f3bc14, 0x476adc447b15ce0c, 0x122ba376f844fd7b}},
{{0x54341b28dd53a2dd, 0xaa17905bdf42fc3f, 0x0ff592d94dd2f8f4, 0x1d03620fe08cd37d}},
{{0xc20232cda2b4e554, 0x9ed0fd42115d187f, 0x2eabb4be7dd479d9, 0x02c70bf52b68ec4c}}},
{{{0xa287ec4b5d0b2fbb, 0x415c5790074882ca, 0xe044a61ec1d0815c, 0x26334f0a409ef5e0}},
{{0xace532bf458d72e1, 0x5be768e07cb73cb5, 0x56cf7d94ee8bbde7, 0x6b0697e3feb43a03}},
{{0xb6c8f04adf62a3c0, 0x3ef000ef076da45d, 0x9c9cb95849f0d2a9, 0x1cc37f43441b2fae}}},
{{{0x508f565a5cc7324f, 0xd061c4c0e506a922, 0xfb18abdb5c45ac19, 0x6c6809c10380314a}},
{{0xd76656f1c9ceaeb9, 0x1c5b15f818e5656a, 0x26e72832844c2334, 0x3a346f772f196838}},
{{0xd2d55112e2da6ac8, 0xe9bd0331b1e851ed, 0x960746dd8ec67262, 0x05911b9f6ef7c5d0}}},
{{{0xc1339983f5df0ebb, 0xc0f3758f512c4cac, 0x2cf1130a0bb398e1, 0x6b3cecf9aa270c62}},
{{0x5349acf3512eeaef, 0x20c141d31cc1cb49, 0x24180c07a99a688d, 0x555ef9d1c64b2d17}},
{{0x36a770ba3b73bd08, 0x624aef08a3afbf0c, 0x5737ff98b40946f2, 0x675f4de13381749d}}},
{{{0x0e2c52036b1782fc, 0x64816c816cad83b4, 0xd0dcbdd96964073e, 0x13d99df70164c520}},
{{0xa12ff6d93bdab31d, 0x0725d80f9d652dfe, 0x019c4ff39abe9487, 0x60f450b882cd3c43}},
{{0x014b5ec321e5c0ca, 0x4fcb69c9d719bfa2, 0x4e5f1c18750023a0, 0x1c06de9e55edac80}}},
{{{0x990f7ad6a33ec4e2, 0x6608f938be2ee08e, 0x9ca143c563284515, 0x4cf38a1fec2db60d}},
{{0xffd52b40ff6d69aa, 0x34530b18dc4049bb, 0x5e4a5c2fa34d9897, 0x78096f8e7d32ba2d}},
{{0xa0aaaa650dfa5ce7, 0xf9c49e2a48b5478c, 0x4f09cc7d7003725b, 0x373cad3a26091abe}}},
{{{0xb294634d82c9f57c, 0x1fcbfde124934536, 0x9e9c4db3418cdb5a, 0x0040f3d9454419fc}},
{{0xf1bea8fb89ddbbad, 0x3bcb2cbc61aeaecb, 0x8f58a7bb1f9b8d9d, 0x21547eda5112a686}},
{{0xdefde939fd5986d3, 0xf4272c89510a380c, 0xb72ba407bb3119b9, 0x63550a334a254df4}}},
{{{0x6507d6edb569cf37, 0x178429b00ca52ee1, 0xea7c0090eb6bd65d, 0x3eea62c7daf78f51}},
{{0x9bba584572547b49, 0xf305c6fae2c408e0, 0x60e8fa69c734f18d, 0x39a92bafaa7d767a}},
{{0x9d24c713e693274e, 0x5f63857768dbd375, 0x70525560eb8ab39a, 0x68436a0665c9c4cd}}},
{{{0xbc0235e8202f3f27, 0xc75c00e264f975b0, 0x91a4e9d5a38c2416, 0x17b6e7f68ab789f9}},
{{0x1e56d317e820107c, 0xc5266844840ae965, 0xc1e0a1c6320ffc7a, 0x5373669c91611472}},
{{0x5d2814ab9a0e5257, 0x908f2084c9cab3fc, 0xafcaf5885b2d1eca, 0x1cb4b5a678f87d11}}},
{{{0xb664c06b394afc6c, 0x0c88de2498da5fb1, 0x4f8d03164bcad834, 0x330bca78de7434a2}},
{{0x6b74aa62a2a007e7, 0xf311e0b0f071c7b1, 0x5707e438000be223, 0x2dc0fd2d82ef6eac}},
{{0x982eff841119744e, 0xf9695e962b074724, 0xc58ac14fbfc953fb, 0x3c31be1b369f1cf5}}},
{{{0xb0f4864d08948aee, 0x07dc19ee91ba1c6f, 0x7975cdaea6aca158, 0x330b61134262d4bb}},
{{0xc168bc93f9cb4272, 0xaeb8711fc7cedb98, 0x7f0e52aa34ac8d7a, 0x41cec1097e7d55bb}},
{{0xf79619d7a26d808a, 0xbb1fd49e1d9e156d, 0x73d7c36cdba1df27, 0x26b44cd91f28777d}}},
{{{0x51f048478f387475, 0xb25dbcf49cbecb3c, 0x9aab1244d99f2055, 0x2c709e6c1c10a5d6}},
{{0xe1b7f29362730383, 0x4b5279ffebca8a2c, 0xdafc778abfd41314, 0x7deb10149c72610f}},
{{0xcb62af6a8766ee7a, 0x66cbec045553cd0e, 0x588001380f0be4b5, 0x08e68e9ff62ce2ea}}},
{{{0x34ad500a4bc130ad, 0x8d38db493d0bd49c, 0xa25c3d98500a89be, 0x2f1f3f87eeba3b09}},
{{0x2f2d09d50ab8f2f9, 0xacb9218dc55923df, 0x4a8f342673766cb9, 0x4cb13bd738f719f5}},
{{0xf7848c75e515b64a, 0xa59501badb4a9038, 0xc20d313f3f751b50, 0x19a1e353c0ae2ee8}}},
{{{0x7d1c7560bafa05c3, 0xb3e1a0a0c6e55e61, 0xe3529718c0d66473, 0x41546b11c20c3486}},
{{0xb42172cdd596bdbd, 0x93e0454398eefc40, 0x9fb15347b44109b5, 0x736bd3990266ae34}},
{{0x85532d509334b3b4, 0x46fd114b60816573, 0xcc5f5f30425c8375, 0x412295a2b87fab5c}}},
{{{0x19c99b88f57ed6e9, 0x5393cb266df8c825, 0x5cee3213b30ad273, 0x14e153ebb52d2e34}},
{{0x2e655261e293eac6, 0x845a92032133acdb, 0x460975cb7900996b, 0x0760bb8d195add80}},
{{0x413e1a17cde6818a, 0x57156da9ed69a084, 0x2cbf268f46caccb1, 0x6b34be9bc33ac5f2}}},
{{{0xf3df2f643a78c0b2, 0x4c3e971ef22e027c, 0xec7d1c5e49c1b5a3, 0x2012c18f0922dd2d}},
{{0x11fc69656571f2d3, 0xc6c9e845530e737a, 0xe33ae7a2d4fe5035, 0x01b9c7b62e6dd30b}},
{{0x880b55e55ac89d29, 0x1483241f45a0a763, 0x3d36efdfc2e76c1f, 0x08af5b784e4bade8}}},
{{{0x283499dc881f2533, 0x9d0525da779323b6, 0x897addfb673441f4, 0x32b79d71163a168d}},
{{0xe27314d289cc2c4b, 0x4be4bd11a287178d, 0x18d528d6fa3364ce, 0x6423c1d5afd9826e}},
{{0xcc85f8d9edfcb36a, 0x22bcc28f3746e5f9, 0xe49de338f9e5d3cd, 0x480a5efbc13e2dcc}}},
{{{0x0b51e70b01622071, 0x06b505cf8b1dafc5, 0x2c6bb061ef5aabcd, 0x47aa27600cb7bf31}},
{{0xb6614ce442ce221f, 0x6e199dcc4c053928, 0x663fb4a4dc1cbe03, 0x24b31d47691c8e06}},
{{0x2a541eedc015f8c3, 0x11a4fe7e7c693f7c, 0xf0af66134ea278d6, 0x545b585d14dda094}}},
{{{0x67bf275ea0d43a0f, 0xade68e34089beebe, 0x4289134cd479e72e, 0x0f62f9c332ba5454}},
{{0x6204e4d0e3b321e1, 0x3baa637a28ff1e95, 0x0b0ccffd5b99bd9e, 0x4d22dc3e64c8d071}},
{{0xfcb46589d63b5f39, 0x5cae6a3f57cbcf61, 0xfebac2d2953afa05, 0x1c0fa01a36371436}}},
{{{0xd2c604b622943dff, 0xbc8cbece44cfb3a0, 0x5d254ff397808678, 0x0fa3614f3b1ca6bf}},
{{0x69082b0e8c936a50, 0xf9c9a035c1dac5b6, 0x6fb73e54c4dfb634, 0x4005419b1d2bc140}},
{{0xa003febdb9be82f0, 0x2089c1af3a44ac90, 0xf8499f911954fa8e, 0x1fba218aef40ab42}}},
{{{0xab549448fac8f53e, 0x81f6e89a7ba63741, 0x74fd6c7d6c2b5e01, 0x392e3acaa8c86e42}},
{{0x4f3e57043e7b0194, 0xa81d3eee08daaf7f, 0xc839c6ab99dcdef1, 0x6c535d13ff7761d5}},
{{0x4cbd34e93e8a35af, 0x2e0781445887e816, 0x19319c76f29ab0ab, 0x25e17fe4d50ac13b}}},
{{{0x0a289bd71e04f676, 0x208e1c52d6420f95, 0x5186d8b034691fab, 0x255751442a9fb351}},
{{0x915f7ff576f121a7, 0xc34a32272fcd87e3, 0xccba2fde4d1be526, 0x6bba828f8969899b}},
{{0xe2d1bc6690fe3901, 0x4cb54a18a0997ad5, 0x971d6914af8460d4, 0x559d504f7f6b7be4}}},
{{{0xa7738378b3eb54d5, 0x1d69d366a5553c7c, 0x0a26cf62f92800ba, 0x01ab12d5807e3217}},
{{0x9c4891e7f6d266fd, 0x0744a19b0307781b, 0x88388f1d6061e23b, 0x123ea6a3354bd50e}},
{{0x118d189041e32d96, 0xb9ede3c2d8315848, 0x1eab4271d83245d9, 0x4a3961e2c918a154}}},
{{{0x71dc3be0f8e6bba0, 0xd6cef8347effe30a, 0xa992425fe13a476a, 0x2cd6bce3fb1db763}},
{{0x0327d644f3233f1e, 0x499a260e34fcf016, 0x83b5a716f2dab979, 0x68aceead9bd4111f}},
{{0x38b4c90ef3d7c210, 0x308e6e24b7ad040c, 0x3860d9f1b7e73e23, 0x595760d5b508f597}}},
{{{0x6129bfe104aa6397, 0x8f960008a4a7fccb, 0x3f8bc0897d909458, 0x709fa43edcb291a9}},
{{0x882acbebfd022790, 0x89af3305c4115760, 0x65f492e37d3473f4, 0x2cb2c5df54515a2b}},
{{0xeb0a5d8c63fd2aca, 0xd22bc1662e694eff, 0x2723f36ef8cbb03a, 0x70f029ecf0c8131f}}},
{{{0x461307b32eed3e33, 0xae042f33a45581e7, 0xc94449d3195f0366, 0x0b7d5d8a6c314858}},
{{0x2a6aafaa5e10b0b9, 0x78f0a370ef041aa9, 0x773efb77aa3ad61f, 0x44eca5a2a74bd9e1}},
{{0x25d448327b95d543, 0x70d38300a3340f1d, 0xde1c531c60e1c52b, 0x272224512c7de9e4}}},
{{{0x1abc92af49c5342e, 0xffeed811b2e6fad0, 0xefa28c8dfcc84e29, 0x11b5df18a44cc543}},
{{0xbf7bbb8a42a975fc, 0x8c5c397796ada358, 0xe27fc76fcdedaa48, 0x19735fd7f6bc20a6}},
{{0xe3ab90d042c84266, 0xeb848e0f7f19547e, 0x2503a1d065a497b9, 0x0fef911191df895f}}}

View File

@ -0,0 +1,96 @@
{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}},
{{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}},
{{0xabc91205877aaa68, 0x26d9e823ccaac49e, 0x5a1b7dcbdd43598c, 0x6f117b689f0c65a8}}},
{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}},
{{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}},
{{0x14ae933f0dd0d889, 0x589423221c35da62, 0xd170e5458cf2db4c, 0x5a2826af12b9b4c6}}},
{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}},
{{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}},
{{0xbcbbdbf1812a8285, 0x270e0807d0bdd1fc, 0xb41b670b1bbda72d, 0x43aabe696b3bb69a}}},
{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}},
{{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}},
{{0xf1836dc801b8b3a2, 0xb3035f47053ea49a, 0x529c41ba5877adf3, 0x7a9fbb1c6a0f90a7}}},
{{{0xf36e217e039d8064, 0x98a081b6f520419b, 0x96cbc608e75eb044, 0x49c05a51fadc9c8f}},
{{0x9b2e678aa6a8632f, 0xa6509e6f51bc46c5, 0xceb233c9c686f5b5, 0x34b9ed338add7f59}},
{{0x06b4e8bf9045af1b, 0xe2ff83e8a719d22f, 0xaaf6fc2993d4cf16, 0x73c172021b008b06}}},
{{{0x315f5b0249864348, 0x3ed6b36977088381, 0xa3a075556a8deb95, 0x18ab598029d5c77f}},
{{0x2fbf00848a802ade, 0xe5d9fecf02302e27, 0x113e847117703406, 0x4275aae2546d8faf}},
{{0xd82b2cc5fd6089e9, 0x031eb4a13282e4a4, 0x44311199b51a8622, 0x3dc65522b53df948}}},
{{{0x506f013b327fbf93, 0xaefcebc99b776f6b, 0x9d12b232aaad5968, 0x0267882d176024a7}},
{{0xbf70c222a2007f6d, 0xbf84b39ab5bcdedb, 0x537a0e12fb07ba07, 0x234fd7eec346f241}},
{{0x5360a119732ea378, 0x2437e6b1df8dd471, 0xa2ef37f891a7e533, 0x497ba6fdaa097863}}},
{{{0x040bcd86468ccf0b, 0xd3829ba42a9910d6, 0x7508300807b25192, 0x43b5cd4218d05ebf}},
{{0x24cecc0313cfeaa0, 0x8648c28d189c246d, 0x2dbdbdfac1f2d4d0, 0x61e22917f12de72b}},
{{0x5d9a762f9bd0b516, 0xeb38af4e373fdeee, 0x032e5a7d93d64270, 0x511d61210ae4d842}}},
{{{0x081386484420de87, 0x8a1cf016b592edb4, 0x39fa4e2729942d25, 0x71a7fe6fe2482810}},
{{0x92c676ef950e9d81, 0xa54620cdc0d7044f, 0xaa9b36646f8f1248, 0x6d325924ddb855e3}},
{{0x6c7182b8a5c8c854, 0x33fd1479fe5f2a03, 0x72cf591883778d0c, 0x4746c4b6559eeaa9}}},
{{{0x348546c864741147, 0x7d35aedd0efcc849, 0xff939a760672a332, 0x219663497db5e6d6}},
{{0xd3777b3c6dc69a2b, 0xdefab2276f89f617, 0x45651cf7b53a16b5, 0x5c9a51de34fe9fb7}},
{{0xf510f1cf79f10e67, 0xffdddaa1e658515b, 0x09c3a71710142277, 0x4804503c608223bb}}},
{{{0x3b6821d23a36d175, 0xbbb40aa7e99b9e32, 0x5d9e5ce420838a47, 0x771e098858de4c5e}},
{{0xc4249ed02ca37fc7, 0xa059a0e3a615acab, 0x88a96ed7c96e0e23, 0x553398a51650696d}},
{{0x9a12f5d278451edf, 0x3ada5d7985899ccb, 0x477f4a2d9fa59508, 0x5a5ed1d68ff5a611}}},
{{{0xbae5e0c558527359, 0x392e5c19cadb9d7e, 0x28653c1eda1cabe9, 0x019b60135fefdc44}},
{{0x1195122afe150e83, 0xcf209a257e4b35d8, 0x7387f8291e711e20, 0x44acb897d8bf92f0}},
{{0x1e6068145e134b83, 0xc4f5e64f24304c16, 0x506e88a8fc1a3ed7, 0x150c49fde6ad2f92}}},
{{{0xb849863c9cdca868, 0xc83f44dbb8714ad0, 0xfe3ee3560c36168d, 0x78a6d7791e05fbc1}},
{{0x8e7bf29509471138, 0x5d6fef394f75a651, 0x10af79c425a708ad, 0x6b2b5a075bb99922}},
{{0x58bf704b47a0b976, 0xa601b355741748d5, 0xaa2b1fb1d542f590, 0x725c7ffc4ad55d00}}},
{{{0x91802bf71cd098c0, 0xfe416ca4ed5e6366, 0xdf585d714902994c, 0x4cd54625f855fae7}},
{{0xe4426715d1cf99b2, 0x7352d51102a20d34, 0x23d1157b8b12109f, 0x794cc9277cb1f3a3}},
{{0x4af6c426c2ac5053, 0xbc9aedad32f67258, 0x2ad032f10a311021, 0x7008357b6fcc8e85}}},
{{{0xd01b9fbb82584a34, 0x47ab6463d2b4792b, 0xb631639c48536202, 0x13a92a3669d6d428}},
{{0x0b88672738773f01, 0xb8ccc8fa95fbccfb, 0x8d2dd5a3b9ad29b6, 0x06ef7e9851ad0f6a}},
{{0xca93771cc0577de5, 0x7540e41e5035dc5c, 0x24680f01d802e071, 0x3c296ddf8a2af86a}}},
{{{0xfceb4d2ebb1f2541, 0xb89510c740adb91f, 0xfc71a37dd0a1ad05, 0x0a892c700747717b}},
{{0xaead15f9d914a713, 0xa92f7bf98c8ff912, 0xaff823179f53d730, 0x7a99d393490c77ba}},
{{0x8f52ed2436bda3e8, 0x77a8c84157e80794, 0xa5a96563262f9ce0, 0x286762d28302f7d2}}},
{{{0x7c558e2bce2ef5bd, 0xe4986cb46747bc63, 0x154a179f3bbb89b8, 0x7686f2a3d6f1767a}},
{{0x4e7836093ce35b25, 0x82e1181db26baa97, 0x0cc192d3cbc7b83f, 0x32f1da046a9d9d3a}},
{{0xaa8d12a66d597c6a, 0x8f11930304d3852b, 0x3f91dc73c209b022, 0x561305f8a9ad28a6}}},
{{{0x6722cc28e7b0c0d5, 0x709de9bbdb075c53, 0xcaf68da7d7010a61, 0x030a1aef2c57cc6c}},
{{0x100c978dec92aed1, 0xca43d5434d6d73e5, 0x83131b22d847ba48, 0x00aaec53e35d4d2c}},
{{0x7bb1f773003ad2aa, 0x0b3f29802b216608, 0x7821dc86520ed23e, 0x20be9c1c24065480}}},
{{{0x20e0e44ae2025e60, 0xb03b3b2fcbdcb938, 0x105d639cf95a0d1c, 0x69764c545067e311}},
{{0xe15387d8249673a6, 0x5943bc2df546e493, 0x1c7f9a81c36f63b5, 0x750ab3361f0ac1de}},
{{0x1e8a3283a2f81037, 0x6f2eda23bd7fcbf1, 0xb72fd15bac2e2563, 0x54f96b3fb7075040}}},
{{{0x177dafc616b11ecd, 0x89764b9cfa576479, 0xb7a8a110e6ece785, 0x78e6839fbe85dbf0}},
{{0x0fadf20429669279, 0x3adda2047d7d724a, 0x6f3d94828c5760f1, 0x3d7fe9c52bb7539e}},
{{0x70332df737b8856b, 0x75d05d43041a178a, 0x320ff74aa0e59e22, 0x70f268f350088242}}},
{{{0x2324112070dcf355, 0x380cc97ee7fce117, 0xb31ddeed3552b698, 0x404e56c039b8c4b9}},
{{0x66864583b1805f47, 0xf535c5d160dd7c19, 0xe9874eb71e4cb006, 0x7c0d345cfad889d9}},
{{0x591f1f4b8c78338a, 0xa0366ab167e0b5e1, 0x5cbc4152b45f3d44, 0x20d754762aaec777}}},
{{{0x9d74feb135b9f543, 0x84b37df1de8c956c, 0xe9322b0757138ba9, 0x38b8ada8790b4ce1}},
{{0x5e8fc36fc73bb758, 0xace543a5363cbb9a, 0xa9934a7d903bc922, 0x2b8f1e46f3ceec62}},
{{0xb5c04a9cdf51f95d, 0x2b3952aecb1fdeac, 0x1d106d8b328b66da, 0x049aeb32ceba1953}}},
{{{0xd7767d3c63dcfe7e, 0x209c594897856e40, 0xb6676861e14f7c13, 0x51c665e0c8d625fc}},
{{0xaa507d0b75fc7931, 0x0fef924b7a6725d3, 0x1d82542b396b3930, 0x795ee17530f674fc}},
{{0x254a5b0a52ecbd81, 0x5d411f6ee034afe7, 0xe6a24d0dcaee4a31, 0x6cd19bf49dc54477}}},
{{{0x7e87619052179ca3, 0x571d0a060b2c9f85, 0x80a2baa88499711e, 0x7520f3db40b2e638}},
{{0x1ffe612165afc386, 0x082a2a88b8d51b10, 0x76f6627e20990baa, 0x5e01b3a7429e43e7}},
{{0x3db50be3d39357a1, 0x967b6cdd599e94a5, 0x1a309a64df311e6e, 0x71092c9ccef3c986}}},
{{{0x53d8523f0364918c, 0xa2b404f43fab6b1c, 0x080b4a9e6681e5a4, 0x0ea15b03d0257ba7}},
{{0x856bd8ac74051dcf, 0x03f6a40855b7aa1e, 0x3a4ae7cbc9743ceb, 0x4173a5bb7137abde}},
{{0x17c56e31f0f9218a, 0x5a696e2b1afc4708, 0xf7931668f4b2f176, 0x5fc565614a4e3a67}}},
{{{0x136e570dc46d7ae5, 0x0fd0aacc54f8dc8f, 0x59549f03310dad86, 0x62711c414c454aa1}},
{{0x4892e1e67790988e, 0x01d5950f1c5cd722, 0xe3b0819ae5923eed, 0x3214c7409d46651b}},
{{0x1329827406651770, 0x3ba4a0668a279436, 0xd9b6b8ec185d223c, 0x5bea94073ecb833c}}},
{{{0x641dbf0912c89be4, 0xacf38b317d6e579c, 0xabfe9e02f697b065, 0x3aacd5c148f61eec}},
{{0xb470ce63f343d2f8, 0x0067ba8f0543e8f1, 0x35da51a1a2117b6f, 0x4ad0785944f1bd2f}},
{{0x858e3b34c3318301, 0xdc99c04707316826, 0x34085b2ed39da88c, 0x3aff0cb1d902853d}}},
{{{0x87c5c7eb3a20405e, 0x8ee311efedad56c9, 0x29252e48ad29d5f9, 0x110e7e86f4cd251d}},
{{0x9226430bf4c53505, 0x68e49c13261f2283, 0x09ef33788fd327c6, 0x2ccf9f732bd99e7f}},
{{0x57c0d89ed603f5e4, 0x12888628f0b0200c, 0x53172709a02e3bb7, 0x05c557e0b9693a37}}},
{{{0xd8f9ce311fc97e6f, 0x7a3f263011f9fdae, 0xe15b7ea08bed25dd, 0x6e154c178fe9875a}},
{{0xf776bbb089c20eb0, 0x61f85bf6fa0fd85c, 0xb6b93f4e634421fb, 0x289fef0841861205}},
{{0xcf616336fed69abf, 0x9b16e4e78335c94f, 0x13789765753a7fe7, 0x6afbf642a95ca319}}},
{{{0x7da8de0c62f5d2c1, 0x98fc3da4b00e7b9a, 0x7deb6ada0dad70e0, 0x0db4b851b95038c4}},
{{0x5de55070f913a8cc, 0x7d1d167b2b0cf561, 0xda2956b690ead489, 0x12c093cedb801ed9}},
{{0xfc147f9308b8190f, 0x06969da0a11ae310, 0xcee75572dac7d7fd, 0x33aa8799c6635ce6}}},
{{{0xaf0ff51ebd085cf2, 0x78f51a8967d33f1f, 0x6ec2bfe15060033c, 0x233c6f29e8e21a86}},
{{0x8348f588fc156cb1, 0x6da2ba9b1a0a6d27, 0xe2262d5c87ca5ab6, 0x212cd0c1c8d589a6}},
{{0xd2f4d5107f18c781, 0x122ecdf2527e9d28, 0xa70a862a3d3d3341, 0x1db7778911914ce3}}},
{{{0xddf352397c6bc26f, 0x7a97e2cc53d50113, 0x7c74f43abf79a330, 0x31ad97ad26e2adfc}},
{{0xb3394769dd701ab6, 0xe2b8ded419cf8da5, 0x15df4161fd2ac852, 0x7ae2ca8a017d24be}},
{{0xb7e817ed0920b962, 0x1e8518cc3f19da9d, 0xe491c14f25560a64, 0x1ed1fc53a6622c83}}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
#include "ge25519.h"
void ge25519_double(ge25519_p3 *r, const ge25519_p3 *p)
{
ge25519_p1p1 grp1p1;
ge25519_dbl_p1p1(&grp1p1, (ge25519_p2 *)p);
ge25519_p1p1_to_p3(r, &grp1p1);
}

View File

@ -0,0 +1,102 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
#define S1_SWINDOWSIZE 5
#define PRE1_SIZE (1<<(S1_SWINDOWSIZE-2))
#define S2_SWINDOWSIZE 7
#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2))
ge25519_niels pre2[PRE2_SIZE] = {
#include "ge25519_base_slide_multiples.data"
};
static const fe25519 ec2d = {{0xEBD69B9426B2F146, 0x00E0149A8283B156, 0x198E80F2EEF3D130, 0xA406D9DC56DFFCE7}};
static void setneutral(ge25519 *r)
{
fe25519_setint(&r->x,0);
fe25519_setint(&r->y,1);
fe25519_setint(&r->z,1);
fe25519_setint(&r->t,0);
}
/* computes [s1]p1 + [s2]p2 */
void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const sc25519 *s2)
{
signed char slide1[256], slide2[256];
ge25519_pniels pre1[PRE1_SIZE], neg;
ge25519_p3 d1;
ge25519_p1p1 t;
ge25519_niels nneg;
fe25519 d;
int i;
sc25519_slide(slide1, s1, S1_SWINDOWSIZE);
sc25519_slide(slide2, s2, S2_SWINDOWSIZE);
/* precomputation */
pre1[0] = *(ge25519_pniels *)p1;
ge25519_dbl_p1p1(&t,(ge25519_p2 *)pre1); ge25519_p1p1_to_p3(&d1, &t);
/* Convert pre[0] to projective Niels representation */
d = pre1[0].ysubx;
fe25519_sub(&pre1[0].ysubx, &pre1[0].xaddy, &pre1[0].ysubx);
fe25519_add(&pre1[0].xaddy, &pre1[0].xaddy, &d);
fe25519_mul(&pre1[0].t2d, &pre1[0].t2d, &ec2d);
for(i=0;i<PRE1_SIZE-1;i++)
{
ge25519_pnielsadd_p1p1(&t, &d1, &pre1[i]); ge25519_p1p1_to_p3((ge25519_p3 *)&pre1[i+1], &t);
/* Convert pre1[i+1] to projective Niels representation */
d = pre1[i+1].ysubx;
fe25519_sub(&pre1[i+1].ysubx, &pre1[i+1].xaddy, &pre1[i+1].ysubx);
fe25519_add(&pre1[i+1].xaddy, &pre1[i+1].xaddy, &d);
fe25519_mul(&pre1[i+1].t2d, &pre1[i+1].t2d, &ec2d);
}
setneutral(r);
for (i = 255;i >= 0;--i) {
if (slide1[i] || slide2[i]) goto firstbit;
}
for(;i>=0;i--)
{
firstbit:
ge25519_dbl_p1p1(&t, (ge25519_p2 *)r);
if(slide1[i]>0)
{
ge25519_p1p1_to_p3(r, &t);
ge25519_pnielsadd_p1p1(&t, r, &pre1[slide1[i]/2]);
}
else if(slide1[i]<0)
{
ge25519_p1p1_to_p3(r, &t);
neg = pre1[-slide1[i]/2];
d = neg.ysubx;
neg.ysubx = neg.xaddy;
neg.xaddy = d;
fe25519_neg(&neg.t2d, &neg.t2d);
ge25519_pnielsadd_p1p1(&t, r, &neg);
}
if(slide2[i]>0)
{
ge25519_p1p1_to_p3(r, &t);
ge25519_nielsadd_p1p1(&t, r, &pre2[slide2[i]/2]);
}
else if(slide2[i]<0)
{
ge25519_p1p1_to_p3(r, &t);
nneg = pre2[-slide2[i]/2];
d = nneg.ysubx;
nneg.ysubx = nneg.xaddy;
nneg.xaddy = d;
fe25519_neg(&nneg.t2d, &nneg.t2d);
ge25519_nielsadd_p1p1(&t, r, &nneg);
}
ge25519_p1p1_to_p2((ge25519_p2 *)r, &t);
}
}

View File

@ -0,0 +1,9 @@
#include "fe25519.h"
#include "ge25519.h"
int ge25519_isneutral_vartime(const ge25519_p3 *p)
{
if(!fe25519_iszero_vartime(&p->x)) return 0;
if(!fe25519_iseq_vartime(&p->y, &p->z)) return 0;
return 1;
}

View File

@ -0,0 +1,102 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
#include "index_heap.h"
static void setneutral(ge25519 *r)
{
fe25519_setint(&r->x,0);
fe25519_setint(&r->y,1);
fe25519_setint(&r->z,1);
fe25519_setint(&r->t,0);
}
static void ge25519_scalarmult_vartime_2limbs(ge25519 *r, ge25519 *p, sc25519 *s)
{
if (s->v[1] == 0 && s->v[0] == 1) /* This will happen most of the time after Bos-Coster */
*r = *p;
else if (s->v[1] == 0 && s->v[0] == 0) /* This won't ever happen, except for all scalars == 0 in Bos-Coster */
setneutral(r);
else
{
ge25519 d;
unsigned long long mask = (1ULL << 63);
int i = 1;
while(!(mask & s->v[1]) && mask != 0)
mask >>= 1;
if(mask == 0)
{
mask = (1ULL << 63);
i = 0;
while(!(mask & s->v[0]) && mask != 0)
mask >>= 1;
}
d = *p;
mask >>= 1;
for(;mask != 0;mask >>= 1)
{
ge25519_double(&d,&d);
if(s->v[i] & mask)
ge25519_add(&d,&d,p);
}
if(i==1)
{
mask = (1ULL << 63);
for(;mask != 0;mask >>= 1)
{
ge25519_double(&d,&d);
if(s->v[0] & mask)
ge25519_add(&d,&d,p);
}
}
*r = d;
}
}
/* caller's responsibility to ensure npoints >= 5 */
void ge25519_multi_scalarmult_vartime(ge25519_p3 *r, ge25519_p3 *p, sc25519 *s, const unsigned long long npoints)
{
unsigned long long pos[npoints];
unsigned long long hlen=((npoints+1)/2)|1;
unsigned long long max1, max2,i;
heap_init(pos, hlen, s);
for(i=0;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[3] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced(pos, hlen, s);
}
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[2] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_3limbs(pos, hlen, s);
}
/* We know that (npoints-1)/2 scalars are only 128-bit scalars */
heap_extend(pos, hlen, npoints, s);
hlen = npoints;
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[1] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_2limbs(pos, hlen, s);
}
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if(sc25519_iszero_vartime(&s[max2])) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_1limb(pos, hlen, s);
}
ge25519_scalarmult_vartime_2limbs(r, &p[max1], &s[max1]);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,13 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
void ge25519_pack(unsigned char r[32], const ge25519_p3 *p)
{
fe25519 tx, ty, zi;
fe25519_invert(&zi, &p->z);
fe25519_mul(&tx, &p->x, &zi);
fe25519_mul(&ty, &p->y, &zi);
fe25519_pack(r, &ty);
r[31] ^= fe25519_getparity(&tx) << 7;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,68 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
/* Multiples of the base point in Niels' representation */
static const ge25519_niels ge25519_base_multiples_niels[] = {
#ifdef SMALLTABLES
#include "ge25519_base_niels_smalltables.data"
#else
#include "ge25519_base_niels.data"
#endif
};
/* d */
static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}};
void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s)
{
signed char b[64];
int i;
ge25519_niels t;
fe25519 d;
sc25519_window4(b,s);
#ifdef SMALLTABLES
ge25519_p1p1 tp1p1;
choose_t((ge25519_niels *)r, 0, (signed long long) b[1], ge25519_base_multiples_niels);
fe25519_sub(&d, &r->y, &r->x);
fe25519_add(&r->y, &r->y, &r->x);
r->x = d;
r->t = r->z;
fe25519_setint(&r->z,2);
for(i=3;i<64;i+=2)
{
choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels);
ge25519_nielsadd2(r, &t);
}
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p3(r, &tp1p1);
choose_t(&t, (unsigned long long) 0, (signed long long) b[0], ge25519_base_multiples_niels);
fe25519_mul(&t.t2d, &t.t2d, &ecd);
ge25519_nielsadd2(r, &t);
for(i=2;i<64;i+=2)
{
choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels);
ge25519_nielsadd2(r, &t);
}
#else
choose_t((ge25519_niels *)r, 0, (signed long long) b[0], ge25519_base_multiples_niels);
fe25519_sub(&d, &r->y, &r->x);
fe25519_add(&r->y, &r->y, &r->x);
r->x = d;
r->t = r->z;
fe25519_setint(&r->z,2);
for(i=1;i<64;i++)
{
choose_t(&t, (unsigned long long) i, (signed long long) b[i], ge25519_base_multiples_niels);
ge25519_nielsadd2(r, &t);
}
#endif
}

View File

@ -0,0 +1,60 @@
#include "fe25519.h"
#include "ge25519.h"
/* d */
static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}};
/* sqrt(-1) */
static const fe25519 sqrtm1 = {{0xC4EE1B274A0EA0B0, 0x2F431806AD2FE478, 0x2B4D00993DFBD7A7, 0x2B8324804FC1DF0B}};
/* return 0 on success, -1 otherwise */
int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32])
{
fe25519 t, chk, num, den, den2, den4, den6;
unsigned char par = p[31] >> 7;
fe25519_setint(&r->z,1);
fe25519_unpack(&r->y, p);
fe25519_square(&num, &r->y); /* x = y^2 */
fe25519_mul(&den, &num, &ecd); /* den = dy^2 */
fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */
fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */
/* Computation of sqrt(num/den)
1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8)
*/
fe25519_square(&den2, &den);
fe25519_square(&den4, &den2);
fe25519_mul(&den6, &den4, &den2);
fe25519_mul(&t, &den6, &num);
fe25519_mul(&t, &t, &den);
fe25519_pow2523(&t, &t);
/* 2. computation of r->x = t * num * den^3
*/
fe25519_mul(&t, &t, &num);
fe25519_mul(&t, &t, &den);
fe25519_mul(&t, &t, &den);
fe25519_mul(&r->x, &t, &den);
/* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not:
*/
fe25519_square(&chk, &r->x);
fe25519_mul(&chk, &chk, &den);
if (!fe25519_iseq_vartime(&chk, &num))
fe25519_mul(&r->x, &r->x, &sqrtm1);
/* 4. Now we have one of the two square roots, except if input was not a square
*/
fe25519_square(&chk, &r->x);
fe25519_mul(&chk, &chk, &den);
if (!fe25519_iseq_vartime(&chk, &num))
return -1;
/* 5. Choose the desired square root according to parity:
*/
if(fe25519_getparity(&r->x) != (1-par))
fe25519_neg(&r->x, &r->x);
fe25519_mul(&r->t, &r->x, &r->y);
return 0;
}

View File

@ -0,0 +1,476 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced
.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced
_crypto_sign_ed25519_amd64_64_heap_rootreplaced:
crypto_sign_ed25519_amd64_64_heap_rootreplaced:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#8),>c2=int64#11
# asm 2: movq 16(<spc=%r10),>c2=%r13
movq 16(%r10),%r13
# qhasm: c3 = *(uint64 *)(spc + 24)
# asm 1: movq 24(<spc=int64#8),>c3=int64#12
# asm 2: movq 24(<spc=%r10),>c3=%r14
movq 24(%r10),%r14
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry
# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11
# asm 2: sbbq 16(<sprc=%rax),<c2=%r13
sbbq 16(%rax),%r13
# qhasm: carry? c3 -= *(uint64 *)(sprc + 24) - carry
# asm 1: sbbq 24(<sprc=int64#7),<c3=int64#12
# asm 2: sbbq 24(<sprc=%rax),<c3=%r14
sbbq 24(%rax),%r14
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#6),>c2=int64#9
# asm 2: movq 16(<spc=%r9),>c2=%r11
movq 16(%r9),%r11
# qhasm: c3 = *(uint64 *)(spc + 24)
# asm 1: movq 24(<spc=int64#6),>c3=int64#10
# asm 2: movq 24(<spc=%r9),>c3=%r12
movq 24(%r9),%r12
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry
# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9
# asm 2: sbbq 16(<spp=%r8),<c2=%r11
sbbq 16(%r8),%r11
# qhasm: carry? c3 -= *(uint64 *)(spp + 24) - carry
# asm 1: sbbq 24(<spp=int64#5),<c3=int64#10
# asm 2: sbbq 24(<spp=%r8),<c3=%r12
sbbq 24(%r8),%r12
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,416 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb
.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb
_crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb:
crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,436 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs
.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs
_crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs:
crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,456 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs
.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs
_crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs:
crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#8),>c2=int64#11
# asm 2: movq 16(<spc=%r10),>c2=%r13
movq 16(%r10),%r13
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry
# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11
# asm 2: sbbq 16(<sprc=%rax),<c2=%r13
sbbq 16(%rax),%r13
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#6),>c2=int64#9
# asm 2: movq 16(<spc=%r9),>c2=%r11
movq 16(%r9),%r11
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry
# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9
# asm 2: sbbq 16(<spp=%r8),<c2=%r11
sbbq 16(%r8),%r11
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,16 @@
/*#include "crypto_hash_sha512.h"*/
#include "hram.h"
extern void ZT_sha512internal(void *digest,const void *data,unsigned int len);
void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen)
{
unsigned long long i;
for (i = 0;i < 32;++i) playground[i] = sm[i];
for (i = 32;i < 64;++i) playground[i] = pk[i-32];
for (i = 64;i < smlen;++i) playground[i] = sm[i];
/*crypto_hash_sha512(hram,playground,smlen);*/
ZT_sha512internal(hram,playground,smlen);
}

View File

@ -0,0 +1,8 @@
#ifndef HRAM_H
#define HRAM_H
#define get_hram crypto_sign_ed25519_amd64_64_get_hram
extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen);
#endif

View File

@ -0,0 +1,5 @@
Daniel J. Bernstein
Niels Duif
Tanja Lange
lead: Peter Schwabe
Bo-Yin Yang

View File

@ -0,0 +1,58 @@
#include "sc25519.h"
#include "index_heap.h"
/* caller's responsibility to ensure hlen>=3 */
void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars)
{
h[0] = 0;
unsigned long long i=1;
while(i<hlen)
heap_push(h, &i, i, scalars);
}
void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars)
{
unsigned long long i=oldlen;
while(i<newlen)
heap_push(h, &i, i, scalars);
}
void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars)
{
/* Move up towards the root */
/* XXX: Check size of hlen, whether cast to signed value is ok */
signed long long pos = *hlen;
signed long long ppos = (pos-1)/2;
unsigned long long t;
h[*hlen] = elem;
while(pos > 0)
{
/* if(sc25519_lt_vartime(&scalars[h[ppos]], &scalars[h[pos]])) */
if(sc25519_lt(&scalars[h[ppos]], &scalars[h[pos]]))
{
t = h[ppos];
h[ppos] = h[pos];
h[pos] = t;
pos = ppos;
ppos = (pos-1)/2;
}
else break;
}
(*hlen)++;
}
/* Put the largest value in the heap in max1, the second largest in max2 */
void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars)
{
*max1 = h[0];
*max2 = h[1];
if(sc25519_lt(&scalars[h[1]],&scalars[h[2]]))
*max2 = h[2];
}
/* After the root has been replaced, restore heap property */
/* extern void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
*/
/* extern void heap_rootreplaced_shortscalars(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
*/

View File

@ -0,0 +1,31 @@
#ifndef INDEX_HEAP_H
#define INDEX_HEAP_H
#include "sc25519.h"
#define heap_init crypto_sign_ed25519_amd64_64_heap_init
#define heap_extend crypto_sign_ed25519_amd64_64_heap_extend
#define heap_pop crypto_sign_ed25519_amd64_64_heap_pop
#define heap_push crypto_sign_ed25519_amd64_64_heap_push
#define heap_get2max crypto_sign_ed25519_amd64_64_heap_get2max
#define heap_rootreplaced crypto_sign_ed25519_amd64_64_heap_rootreplaced
#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs
#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs
#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb
void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars);
unsigned long long heap_pop(unsigned long long *h, unsigned long long *hlen, sc25519 *scalars);
void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars);
void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars);
void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_3limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_2limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_1limb(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
#endif

View File

@ -0,0 +1,25 @@
#include <string.h>
#include "crypto_sign.h"
#include "crypto_hash_sha512.h"
#include "randombytes.h"
#include "ge25519.h"
int crypto_sign_keypair(unsigned char *pk,unsigned char *sk)
{
unsigned char az[64];
sc25519 scsk;
ge25519 gepk;
randombytes(sk,32);
crypto_hash_sha512(az,sk,32);
az[0] &= 248;
az[31] &= 127;
az[31] |= 64;
sc25519_from32bytes(&scsk,az);
ge25519_scalarmult_base(&gepk, &scsk);
ge25519_pack(pk, &gepk);
memmove(sk + 32,pk,32);
return 0;
}

View File

@ -0,0 +1,49 @@
#include <string.h>
#include "crypto_sign.h"
#include "crypto_verify_32.h"
#include "crypto_hash_sha512.h"
#include "ge25519.h"
int crypto_sign_open(
unsigned char *m,unsigned long long *mlen,
const unsigned char *sm,unsigned long long smlen,
const unsigned char *pk
)
{
unsigned char pkcopy[32];
unsigned char rcopy[32];
unsigned char hram[64];
unsigned char rcheck[32];
ge25519 get1, get2;
sc25519 schram, scs;
if (smlen < 64) goto badsig;
if (sm[63] & 224) goto badsig;
if (ge25519_unpackneg_vartime(&get1,pk)) goto badsig;
memmove(pkcopy,pk,32);
memmove(rcopy,sm,32);
sc25519_from32bytes(&scs, sm+32);
memmove(m,sm,smlen);
memmove(m + 32,pkcopy,32);
crypto_hash_sha512(hram,m,smlen);
sc25519_from64bytes(&schram, hram);
ge25519_double_scalarmult_vartime(&get2, &get1, &schram, &scs);
ge25519_pack(rcheck, &get2);
if (crypto_verify_32(rcopy,rcheck) == 0) {
memmove(m,m + 64,smlen - 64);
memset(m + smlen - 64,0,64);
*mlen = smlen - 64;
return 0;
}
badsig:
*mlen = (unsigned long long) -1;
memset(m,0,smlen);
return -1;
}

View File

@ -0,0 +1,66 @@
#ifndef SC25519_H
#define SC25519_H
#define sc25519 crypto_sign_ed25519_amd64_64_sc25519
#define shortsc25519 crypto_sign_ed25519_amd64_64_shortsc25519
#define sc25519_from32bytes crypto_sign_ed25519_amd64_64_sc25519_from32bytes
#define shortsc25519_from16bytes crypto_sign_ed25519_amd64_64_shortsc25519_from16bytes
#define sc25519_from64bytes crypto_sign_ed25519_amd64_64_sc25519_from64bytes
#define sc25519_from_shortsc crypto_sign_ed25519_amd64_64_sc25519_from_shortsc
#define sc25519_to32bytes crypto_sign_ed25519_amd64_64_sc25519_to32bytes
#define sc25519_iszero_vartime crypto_sign_ed25519_amd64_64_sc25519_iszero_vartime
#define sc25519_isshort_vartime crypto_sign_ed25519_amd64_64_sc25519_isshort_vartime
#define sc25519_lt crypto_sign_ed25519_amd64_64_sc25519_lt
#define sc25519_add crypto_sign_ed25519_amd64_64_sc25519_add
#define sc25519_sub_nored crypto_sign_ed25519_amd64_64_sc25519_sub_nored
#define sc25519_mul crypto_sign_ed25519_amd64_64_sc25519_mul
#define sc25519_mul_shortsc crypto_sign_ed25519_amd64_64_sc25519_mul_shortsc
#define sc25519_window4 crypto_sign_ed25519_amd64_64_sc25519_window4
#define sc25519_slide crypto_sign_ed25519_amd64_64_sc25519_slide
#define sc25519_2interleave2 crypto_sign_ed25519_amd64_64_sc25519_2interleave2
#define sc25519_barrett crypto_sign_ed25519_amd64_64_sc25519_barrett
typedef struct
{
unsigned long long v[4];
}
sc25519;
typedef struct
{
unsigned long long v[2];
}
shortsc25519;
void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]);
void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]);
void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x);
void sc25519_to32bytes(unsigned char r[32], const sc25519 *x);
int sc25519_iszero_vartime(const sc25519 *x);
int sc25519_lt(const sc25519 *x, const sc25519 *y);
void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_sub_nored(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y);
/* Convert s into a representation of the form \sum_{i=0}^{63}r[i]2^(4*i)
* with r[i] in {-8,...,7}
*/
void sc25519_window4(signed char r[85], const sc25519 *s);
void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize);
void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1, const sc25519 *s2);
void sc25519_barrett(sc25519 *r, unsigned long long x[8]);
#endif

View File

@ -0,0 +1,232 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_add
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_sc25519_add
.globl crypto_sign_ed25519_amd64_64_sc25519_add
_crypto_sign_ed25519_amd64_64_sc25519_add:
crypto_sign_ed25519_amd64_64_sc25519_add:
mov %rsp,%r11
and $31,%r11
add $32,%r11
sub %r11,%rsp
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#1
# asm 2: movq <caller4=%r14,>caller4_stack=0(%rsp)
movq %r14,0(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#2
# asm 2: movq <caller5=%r15,>caller5_stack=8(%rsp)
movq %r15,8(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#3
# asm 2: movq <caller6=%rbx,>caller6_stack=16(%rsp)
movq %rbx,16(%rsp)
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 += *(uint64 *)(yp + 0)
# asm 1: addq 0(<yp=int64#3),<r0=int64#4
# asm 2: addq 0(<yp=%rdx),<r0=%rcx
addq 0(%rdx),%rcx
# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry
# asm 1: adcq 8(<yp=int64#3),<r1=int64#5
# asm 2: adcq 8(<yp=%rdx),<r1=%r8
adcq 8(%rdx),%r8
# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry
# asm 1: adcq 16(<yp=int64#3),<r2=int64#6
# asm 2: adcq 16(<yp=%rdx),<r2=%r9
adcq 16(%rdx),%r9
# qhasm: r3 += *(uint64 *)(yp + 24) + carry
# asm 1: adcq 24(<yp=int64#3),<r3=int64#2
# asm 2: adcq 24(<yp=%rdx),<r3=%rsi
adcq 24(%rdx),%rsi
# qhasm: t0 = r0
# asm 1: mov <r0=int64#4,>t0=int64#3
# asm 2: mov <r0=%rcx,>t0=%rdx
mov %rcx,%rdx
# qhasm: t1 = r1
# asm 1: mov <r1=int64#5,>t1=int64#7
# asm 2: mov <r1=%r8,>t1=%rax
mov %r8,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#6,>t2=int64#8
# asm 2: mov <r2=%r9,>t2=%r10
mov %r9,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#2,>t3=int64#12
# asm 2: mov <r3=%rsi,>t3=%r14
mov %rsi,%r14
# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#3
# asm 2: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rdx
sub crypto_sign_ed25519_amd64_64_ORDER0,%rdx
# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#7
# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=%rax
sbb crypto_sign_ed25519_amd64_64_ORDER1,%rax
# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8
# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10
sbb crypto_sign_ed25519_amd64_64_ORDER2,%r10
# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#12
# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r14
sbb crypto_sign_ed25519_amd64_64_ORDER3,%r14
# qhasm: r0 = t0 if !unsigned<
# asm 1: cmovae <t0=int64#3,<r0=int64#4
# asm 2: cmovae <t0=%rdx,<r0=%rcx
cmovae %rdx,%rcx
# qhasm: r1 = t1 if !unsigned<
# asm 1: cmovae <t1=int64#7,<r1=int64#5
# asm 2: cmovae <t1=%rax,<r1=%r8
cmovae %rax,%r8
# qhasm: r2 = t2 if !unsigned<
# asm 1: cmovae <t2=int64#8,<r2=int64#6
# asm 2: cmovae <t2=%r10,<r2=%r9
cmovae %r10,%r9
# qhasm: r3 = t3 if !unsigned<
# asm 1: cmovae <t3=int64#12,<r3=int64#2
# asm 2: cmovae <t3=%r14,<r3=%rsi
cmovae %r14,%rsi
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#1,>caller4=int64#12
# asm 2: movq <caller4_stack=0(%rsp),>caller4=%r14
movq 0(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#2,>caller5=int64#13
# asm 2: movq <caller5_stack=8(%rsp),>caller5=%r15
movq 8(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#3,>caller6=int64#14
# asm 2: movq <caller6_stack=16(%rsp),>caller6=%rbx
movq 16(%rsp),%rbx
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,55 @@
#include "sc25519.h"
/*Arithmetic modulo the group order n = 2^252 + 27742317777372353535851937790883648493
* = 7237005577332262213973186563042994240857116359379907606001950938285454250989
*/
/* Contains order, 2*order, 4*order, 8*order, each represented in 4 consecutive unsigned long long */
static const unsigned long long order[16] = {0x5812631A5CF5D3EDULL, 0x14DEF9DEA2F79CD6ULL,
0x0000000000000000ULL, 0x1000000000000000ULL,
0xB024C634B9EBA7DAULL, 0x29BDF3BD45EF39ACULL,
0x0000000000000000ULL, 0x2000000000000000ULL,
0x60498C6973D74FB4ULL, 0x537BE77A8BDE7359ULL,
0x0000000000000000ULL, 0x4000000000000000ULL,
0xC09318D2E7AE9F68ULL, 0xA6F7CEF517BCE6B2ULL,
0x0000000000000000ULL, 0x8000000000000000ULL};
static unsigned long long smaller(unsigned long long a,unsigned long long b)
{
unsigned long long atop = a >> 32;
unsigned long long abot = a & 4294967295;
unsigned long long btop = b >> 32;
unsigned long long bbot = b & 4294967295;
unsigned long long atopbelowbtop = (atop - btop) >> 63;
unsigned long long atopeqbtop = ((atop ^ btop) - 1) >> 63;
unsigned long long abotbelowbbot = (abot - bbot) >> 63;
return atopbelowbtop | (atopeqbtop & abotbelowbbot);
}
void sc25519_from32bytes(sc25519 *r, const unsigned char x[32])
{
unsigned long long t[4];
unsigned long long b;
unsigned long long mask;
int i, j;
/* assuming little-endian */
r->v[0] = *(unsigned long long *)x;
r->v[1] = *(((unsigned long long *)x)+1);
r->v[2] = *(((unsigned long long *)x)+2);
r->v[3] = *(((unsigned long long *)x)+3);
for(j=3;j>=0;j--)
{
b=0;
for(i=0;i<4;i++)
{
b += order[4*j+i]; /* no overflow for this particular order */
t[i] = r->v[i] - b;
b = smaller(r->v[i],b);
}
mask = b - 1;
for(i=0;i<4;i++)
r->v[i] ^= mask & (r->v[i] ^ t[i]);
}
}

View File

@ -0,0 +1,7 @@
#include "sc25519.h"
void sc25519_from64bytes(sc25519 *r, const unsigned char x[64])
{
/* assuming little-endian representation of unsigned long long */
sc25519_barrett(r, (unsigned long long *)x);
}

View File

@ -0,0 +1,9 @@
#include "sc25519.h"
void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x)
{
r->v[0] = x->v[0];
r->v[1] = x->v[1];
r->v[2] = 0;
r->v[3] = 0;
}

View File

@ -0,0 +1,10 @@
#include "sc25519.h"
int sc25519_iszero_vartime(const sc25519 *x)
{
if(x->v[0] != 0) return 0;
if(x->v[1] != 0) return 0;
if(x->v[2] != 0) return 0;
if(x->v[3] != 0) return 0;
return 1;
}

View File

@ -0,0 +1,131 @@
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: int64 ret
# qhasm: input xp
# qhasm: input yp
# qhasm: output ret
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 doof
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_lt
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_sc25519_lt
.globl crypto_sign_ed25519_amd64_64_sc25519_lt
_crypto_sign_ed25519_amd64_64_sc25519_lt:
crypto_sign_ed25519_amd64_64_sc25519_lt:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: t0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#1),>t0=int64#3
# asm 2: movq 0(<xp=%rdi),>t0=%rdx
movq 0(%rdi),%rdx
# qhasm: t1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#1),>t1=int64#4
# asm 2: movq 8(<xp=%rdi),>t1=%rcx
movq 8(%rdi),%rcx
# qhasm: t2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#1),>t2=int64#5
# asm 2: movq 16(<xp=%rdi),>t2=%r8
movq 16(%rdi),%r8
# qhasm: t3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#1),>t3=int64#1
# asm 2: movq 24(<xp=%rdi),>t3=%rdi
movq 24(%rdi),%rdi
# qhasm: carry? t0 -= *(uint64 *)(yp + 0)
# asm 1: subq 0(<yp=int64#2),<t0=int64#3
# asm 2: subq 0(<yp=%rsi),<t0=%rdx
subq 0(%rsi),%rdx
# qhasm: carry? t1 -= *(uint64 *)(yp + 8) - carry
# asm 1: sbbq 8(<yp=int64#2),<t1=int64#4
# asm 2: sbbq 8(<yp=%rsi),<t1=%rcx
sbbq 8(%rsi),%rcx
# qhasm: carry? t2 -= *(uint64 *)(yp + 16) - carry
# asm 1: sbbq 16(<yp=int64#2),<t2=int64#5
# asm 2: sbbq 16(<yp=%rsi),<t2=%r8
sbbq 16(%rsi),%r8
# qhasm: carry? t3 -= *(uint64 *)(yp + 24) - carry
# asm 1: sbbq 24(<yp=int64#2),<t3=int64#1
# asm 2: sbbq 24(<yp=%rsi),<t3=%rdi
sbbq 24(%rsi),%rdi
# qhasm: ret = 0
# asm 1: mov $0,>ret=int64#1
# asm 2: mov $0,>ret=%rdi
mov $0,%rdi
# qhasm: doof = 1
# asm 1: mov $1,>doof=int64#2
# asm 2: mov $1,>doof=%rsi
mov $1,%rsi
# qhasm: ret = doof if carry
# asm 1: cmovc <doof=int64#2,<ret=int64#1
# asm 2: cmovc <doof=%rsi,<ret=%rdi
cmovc %rsi,%rdi
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,12 @@
#include "sc25519.h"
#define ull4_mul crypto_sign_ed25519_amd64_64_ull4_mul
extern void ull4_mul(unsigned long long r[8], const unsigned long long x[4], const unsigned long long y[4]);
void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y)
{
unsigned long long t[8];
ull4_mul(t, x->v, y->v);
sc25519_barrett(r, t);
}

View File

@ -0,0 +1,9 @@
#include "sc25519.h"
void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y)
{
/* XXX: This wants to be faster */
sc25519 t;
sc25519_from_shortsc(&t, y);
sc25519_mul(r, x, &t);
}

View File

@ -0,0 +1,49 @@
#include "sc25519.h"
void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize)
{
int i,j,k,b,m=(1<<(swindowsize-1))-1, soplen=256;
unsigned long long sv0 = s->v[0];
unsigned long long sv1 = s->v[1];
unsigned long long sv2 = s->v[2];
unsigned long long sv3 = s->v[3];
/* first put the binary expansion into r */
for(i=0;i<64;i++) {
r[i] = sv0 & 1;
r[i+64] = sv1 & 1;
r[i+128] = sv2 & 1;
r[i+192] = sv3 & 1;
sv0 >>= 1;
sv1 >>= 1;
sv2 >>= 1;
sv3 >>= 1;
}
/* Making it sliding window */
for (j = 0;j < soplen;++j)
{
if (r[j]) {
for (b = 1;b < soplen - j && b <= 6;++b) {
if (r[j] + (r[j + b] << b) <= m)
{
r[j] += r[j + b] << b; r[j + b] = 0;
}
else if (r[j] - (r[j + b] << b) >= -m)
{
r[j] -= r[j + b] << b;
for (k = j + b;k < soplen;++k)
{
if (!r[k]) {
r[k] = 1;
break;
}
r[k] = 0;
}
}
else if (r[j + b])
break;
}
}
}
}

View File

@ -0,0 +1,142 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_sub_nored
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_sc25519_sub_nored
.globl crypto_sign_ed25519_amd64_64_sc25519_sub_nored
_crypto_sign_ed25519_amd64_64_sc25519_sub_nored:
crypto_sign_ed25519_amd64_64_sc25519_sub_nored:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 -= *(uint64 *)(yp + 0)
# asm 1: subq 0(<yp=int64#3),<r0=int64#4
# asm 2: subq 0(<yp=%rdx),<r0=%rcx
subq 0(%rdx),%rcx
# qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry
# asm 1: sbbq 8(<yp=int64#3),<r1=int64#5
# asm 2: sbbq 8(<yp=%rdx),<r1=%r8
sbbq 8(%rdx),%r8
# qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry
# asm 1: sbbq 16(<yp=int64#3),<r2=int64#6
# asm 2: sbbq 16(<yp=%rdx),<r2=%r9
sbbq 16(%rdx),%r9
# qhasm: r3 -= *(uint64 *)(yp + 24) - carry
# asm 1: sbbq 24(<yp=int64#3),<r3=int64#2
# asm 2: sbbq 24(<yp=%rdx),<r3=%rsi
sbbq 24(%rdx),%rsi
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -0,0 +1,8 @@
#include "sc25519.h"
void sc25519_to32bytes(unsigned char r[32], const sc25519 *x)
{
/* assuming little-endian */
int i;
for(i=0;i<32;i++) r[i] = i[(unsigned char *)x->v];
}

View File

@ -0,0 +1,27 @@
#include "sc25519.h"
void sc25519_window4(signed char r[64], const sc25519 *s)
{
char carry;
int i;
for(i=0;i<16;i++)
r[i] = (s->v[0] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+16] = (s->v[1] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+32] = (s->v[2] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+48] = (s->v[3] >> (4*i)) & 15;
/* Making it signed */
carry = 0;
for(i=0;i<63;i++)
{
r[i] += carry;
r[i+1] += r[i] >> 4;
r[i] &= 15;
carry = r[i] >> 3;
r[i] -= carry << 4;
}
r[63] += carry;
}

View File

@ -0,0 +1,165 @@
#include <stdlib.h>
#include <string.h>
/*#include "crypto_sign.h"
#include "crypto_hash_sha512.h"*/
#include "ge25519.h"
/* Original */
#if 0
int crypto_sign(
unsigned char *sm,unsigned long long *smlen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *sk
)
{
unsigned char pk[32];
unsigned char az[64];
unsigned char nonce[64];
unsigned char hram[64];
sc25519 sck, scs, scsk;
ge25519 ger;
memmove(pk,sk + 32,32);
/* pk: 32-byte public key A */
crypto_hash_sha512(az,sk,32);
az[0] &= 248;
az[31] &= 127;
az[31] |= 64;
/* az: 32-byte scalar a, 32-byte randomizer z */
*smlen = mlen + 64;
memmove(sm + 64,m,mlen);
memmove(sm + 32,az + 32,32);
/* sm: 32-byte uninit, 32-byte z, mlen-byte m */
crypto_hash_sha512(nonce, sm+32, mlen+32);
/* nonce: 64-byte H(z,m) */
sc25519_from64bytes(&sck, nonce);
ge25519_scalarmult_base(&ger, &sck);
ge25519_pack(sm, &ger);
/* sm: 32-byte R, 32-byte z, mlen-byte m */
memmove(sm + 32,pk,32);
/* sm: 32-byte R, 32-byte A, mlen-byte m */
crypto_hash_sha512(hram,sm,mlen + 64);
/* hram: 64-byte H(R,A,m) */
sc25519_from64bytes(&scs, hram);
sc25519_from32bytes(&scsk, az);
sc25519_mul(&scs, &scs, &scsk);
sc25519_add(&scs, &scs, &sck);
/* scs: S = nonce + H(R,A,m)a */
sc25519_to32bytes(sm + 32,&scs);
/* sm: 32-byte R, 32-byte S, mlen-byte m */
return 0;
}
#endif
#if 0
void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPublic,const void *msg,unsigned int len,void *signature)
{
sc25519 sck, scs, scsk;
ge25519 ger;
unsigned char r[32];
unsigned char s[32];
unsigned char extsk[64];
unsigned char hmg[crypto_hash_sha512_BYTES];
unsigned char hram[crypto_hash_sha512_BYTES];
unsigned char *sig = (unsigned char *)signature;
unsigned char digest[64]; // we sign the first 32 bytes of SHA-512(msg)
SHA512::hash(digest,msg,len);
SHA512::hash(extsk,myPrivate.data + 32,32);
extsk[0] &= 248;
extsk[31] &= 127;
extsk[31] |= 64;
for(unsigned int i=0;i<32;i++)
sig[32 + i] = extsk[32 + i];
for(unsigned int i=0;i<32;i++)
sig[64 + i] = digest[i];
SHA512::hash(hmg,sig + 32,64);
/* Computation of R */
sc25519_from64bytes(&sck, hmg);
ge25519_scalarmult_base(&ger, &sck);
ge25519_pack(r, &ger);
/* Computation of s */
for(unsigned int i=0;i<32;i++)
sig[i] = r[i];
get_hram(hram,sig,myPublic.data + 32,sig,96);
sc25519_from64bytes(&scs, hram);
sc25519_from32bytes(&scsk, extsk);
sc25519_mul(&scs, &scs, &scsk);
sc25519_add(&scs, &scs, &sck);
sc25519_to32bytes(s,&scs); /* cat s */
for(unsigned int i=0;i<32;i++)
sig[32 + i] = s[i];
}
void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen)
{
unsigned long long i;
for (i = 0;i < 32;++i) playground[i] = sm[i];
for (i = 32;i < 64;++i) playground[i] = pk[i-32];
for (i = 64;i < smlen;++i) playground[i] = sm[i];
//crypto_hash_sha512(hram,playground,smlen);
ZeroTier::SHA512::hash(hram,playground,(unsigned int)smlen);
}
#endif
extern void ZT_sha512internal(void *digest,const void *data,unsigned int len);
extern void ed25519_amd64_asm_sign(const unsigned char *sk,const unsigned char *pk,const unsigned char *m,const unsigned int mlen,unsigned char *sig)
{
unsigned char az[64];
unsigned char nonce[64];
unsigned char hram[64];
sc25519 sck, scs, scsk;
ge25519 ger;
unsigned char digest[64];
unsigned int i;
ZT_sha512internal(digest,m,mlen);
ZT_sha512internal(az,sk,32);
az[0] &= 248;
az[31] &= 127;
az[31] |= 64;
for(i=0;i<32;i++)
sig[32 + i] = az[32 + i];
for(i=0;i<32;i++)
sig[64 + i] = digest[i];
ZT_sha512internal(nonce,sig + 32,64);
sc25519_from64bytes(&sck, nonce);
ge25519_scalarmult_base(&ger, &sck);
ge25519_pack(sig, &ger);
memmove(sig + 32,pk,32);
ZT_sha512internal(hram,sig,96);
sc25519_from64bytes(&scs, hram);
sc25519_from32bytes(&scsk, az);
sc25519_mul(&scs, &scs, &scsk);
sc25519_add(&scs, &scs, &sck);
sc25519_to32bytes(sig + 32,&scs);
}

View File

@ -0,0 +1,716 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 r5
# qhasm: int64 r6
# qhasm: int64 r7
# qhasm: int64 c
# qhasm: int64 zero
# qhasm: int64 rax
# qhasm: int64 rdx
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_ull4_mul
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_ull4_mul
.globl crypto_sign_ed25519_amd64_64_ull4_mul
_crypto_sign_ed25519_amd64_64_ull4_mul:
crypto_sign_ed25519_amd64_64_ull4_mul:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: yp = yp
# asm 1: mov <yp=int64#3,>yp=int64#4
# asm 2: mov <yp=%rdx,>yp=%rcx
mov %rdx,%rcx
# qhasm: r4 = 0
# asm 1: mov $0,>r4=int64#5
# asm 2: mov $0,>r4=%r8
mov $0,%r8
# qhasm: r5 = 0
# asm 1: mov $0,>r5=int64#6
# asm 2: mov $0,>r5=%r9
mov $0,%r9
# qhasm: r6 = 0
# asm 1: mov $0,>r6=int64#8
# asm 2: mov $0,>r6=%r10
mov $0,%r10
# qhasm: r7 = 0
# asm 1: mov $0,>r7=int64#9
# asm 2: mov $0,>r7=%r11
mov $0,%r11
# qhasm: zero = 0
# asm 1: mov $0,>zero=int64#10
# asm 2: mov $0,>zero=%r12
mov $0,%r12
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: r0 = rax
# asm 1: mov <rax=int64#7,>r0=int64#11
# asm 2: mov <rax=%rax,>r0=%r13
mov %rax,%r13
# qhasm: c = rdx
# asm 1: mov <rdx=int64#3,>c=int64#12
# asm 2: mov <rdx=%rdx,>c=%r14
mov %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: r1 = rax
# asm 1: mov <rax=int64#7,>r1=int64#13
# asm 2: mov <rax=%rax,>r1=%r15
mov %rax,%r15
# qhasm: carry? r1 += c
# asm 1: add <c=int64#12,<r1=int64#13
# asm 2: add <c=%r14,<r1=%r15
add %r14,%r15
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: r2 = rax
# asm 1: mov <rax=int64#7,>r2=int64#14
# asm 2: mov <rax=%rax,>r2=%rbx
mov %rax,%rbx
# qhasm: carry? r2 += c
# asm 1: add <c=int64#12,<r2=int64#14
# asm 2: add <c=%r14,<r2=%rbx
add %r14,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: r3 = rax
# asm 1: mov <rax=int64#7,>r3=int64#15
# asm 2: mov <rax=%rax,>r3=%rbp
mov %rax,%rbp
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: r4 += rdx + carry
# asm 1: adc <rdx=int64#3,<r4=int64#5
# asm 2: adc <rdx=%rdx,<r4=%r8
adc %rdx,%r8
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r1 += rax
# asm 1: add <rax=int64#7,<r1=int64#13
# asm 2: add <rax=%rax,<r1=%r15
add %rax,%r15
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r2 += rax
# asm 1: add <rax=int64#7,<r2=int64#14
# asm 2: add <rax=%rax,<r2=%rbx
add %rax,%rbx
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r2 += c
# asm 1: add <c=int64#12,<r2=int64#14
# asm 2: add <c=%r14,<r2=%rbx
add %r14,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: r5 += rdx + carry
# asm 1: adc <rdx=int64#3,<r5=int64#6
# asm 2: adc <rdx=%rdx,<r5=%r9
adc %rdx,%r9
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r2 += rax
# asm 1: add <rax=int64#7,<r2=int64#14
# asm 2: add <rax=%rax,<r2=%rbx
add %rax,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r5 += rax
# asm 1: add <rax=int64#7,<r5=int64#6
# asm 2: add <rax=%rax,<r5=%r9
add %rax,%r9
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r5 += c
# asm 1: add <c=int64#12,<r5=int64#6
# asm 2: add <c=%r14,<r5=%r9
add %r14,%r9
# qhasm: r6 += rdx + carry
# asm 1: adc <rdx=int64#3,<r6=int64#8
# asm 2: adc <rdx=%rdx,<r6=%r10
adc %rdx,%r10
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r5 += rax
# asm 1: add <rax=int64#7,<r5=int64#6
# asm 2: add <rax=%rax,<r5=%r9
add %rax,%r9
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r5 += c
# asm 1: add <c=int64#12,<r5=int64#6
# asm 2: add <c=%r14,<r5=%r9
add %r14,%r9
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r6 += rax
# asm 1: add <rax=int64#7,<r6=int64#8
# asm 2: add <rax=%rax,<r6=%r10
add %rax,%r10
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r6 += c
# asm 1: add <c=int64#12,<r6=int64#8
# asm 2: add <c=%r14,<r6=%r10
add %r14,%r10
# qhasm: r7 += rdx + carry
# asm 1: adc <rdx=int64#3,<r7=int64#9
# asm 2: adc <rdx=%rdx,<r7=%r11
adc %rdx,%r11
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#11,0(<rp=int64#1)
# asm 2: movq <r0=%r13,0(<rp=%rdi)
movq %r13,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#13,8(<rp=int64#1)
# asm 2: movq <r1=%r15,8(<rp=%rdi)
movq %r15,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#14,16(<rp=int64#1)
# asm 2: movq <r2=%rbx,16(<rp=%rdi)
movq %rbx,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#15,24(<rp=int64#1)
# asm 2: movq <r3=%rbp,24(<rp=%rdi)
movq %rbp,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#5,32(<rp=int64#1)
# asm 2: movq <r4=%r8,32(<rp=%rdi)
movq %r8,32(%rdi)
# qhasm: *(uint64 *)(rp + 40) = r5
# asm 1: movq <r5=int64#6,40(<rp=int64#1)
# asm 2: movq <r5=%r9,40(<rp=%rdi)
movq %r9,40(%rdi)
# qhasm: *(uint64 *)(rp + 48) = r6
# asm 1: movq <r6=int64#8,48(<rp=int64#1)
# asm 2: movq <r6=%r10,48(<rp=%rdi)
movq %r10,48(%rdi)
# qhasm: *(uint64 *)(rp + 56) = r7
# asm 1: movq <r7=int64#9,56(<rp=int64#1)
# asm 2: movq <r7=%r11,56(<rp=%rdi)
movq %r11,56(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View File

@ -64,7 +64,6 @@ endif
ifeq ($(ZT_DEBUG),1)
override CFLAGS+=-Wall -Wno-deprecated -Werror -g -pthread $(INCLUDES) $(DEFS)
override CXXFLAGS+=-Wall -Wno-deprecated -Werror -g -std=c++11 -pthread $(INCLUDES) $(DEFS)
override LDFLAGS+=
ZT_TRACE=1
STRIP?=echo
# The following line enables optimization for the crypto code, since
@ -72,10 +71,9 @@ ifeq ($(ZT_DEBUG),1)
node/Salsa20.o node/SHA512.o node/C25519.o node/Poly1305.o: CXXFLAGS=-Wall -O2 -g -pthread $(INCLUDES) $(DEFS)
else
CFLAGS?=-O3 -fstack-protector
override CFLAGS+=-Wall -Wno-deprecated -fPIE -pthread $(INCLUDES) -DNDEBUG $(DEFS)
override CFLAGS+=-Wall -Wno-deprecated -pthread $(INCLUDES) -DNDEBUG $(DEFS)
CXXFLAGS?=-O3 -fstack-protector
override CXXFLAGS+=-Wall -Wno-deprecated -fPIE -std=c++11 -pthread $(INCLUDES) -DNDEBUG $(DEFS)
override LDFLAGS+=-pie -Wl,-z,relro,-z,now
override CXXFLAGS+=-Wall -Wno-deprecated -std=c++11 -pthread $(INCLUDES) -DNDEBUG $(DEFS)
STRIP?=strip
STRIP+=--strip-all
endif
@ -104,11 +102,11 @@ CC_MACH=$(shell $(CC) -dumpmachine | cut -d '-' -f 1)
ZT_ARCHITECTURE=999
ifeq ($(CC_MACH),x86_64)
ZT_ARCHITECTURE=2
ZT_USE_X64_ASM_SALSA2012=1
ZT_USE_X64_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),amd64)
ZT_ARCHITECTURE=2
ZT_USE_X64_ASM_SALSA2012=1
ZT_USE_X64_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),powerpc64le)
ZT_ARCHITECTURE=8
@ -134,42 +132,42 @@ endif
ifeq ($(CC_MACH),arm)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armel)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armhf)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armv6)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armv6zk)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armv6kz)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armv7)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),armv7l)
ZT_ARCHITECTURE=3
override DEFS+=-DZT_NO_TYPE_PUNNING
ZT_USE_ARM32_NEON_ASM_SALSA2012=1
ZT_USE_ARM32_NEON_ASM_CRYPTO=1
endif
ifeq ($(CC_MACH),arm64)
ZT_ARCHITECTURE=4
@ -225,7 +223,7 @@ ifeq ($(ZT_ARCHITECTURE),3)
ifeq ($(shell if [ -e /usr/bin/dpkg ]; then dpkg --print-architecture; fi),armel)
override CFLAGS+=-march=armv5 -mfloat-abi=soft -msoft-float -mno-unaligned-access -marm
override CXXFLAGS+=-march=armv5 -mfloat-abi=soft -msoft-float -mno-unaligned-access -marm
ZT_USE_ARM32_NEON_ASM_SALSA2012=0
ZT_USE_ARM32_NEON_ASM_CRYPTO=0
else
override CFLAGS+=-march=armv5 -mno-unaligned-access -marm
override CXXFLAGS+=-march=armv5 -mno-unaligned-access -marm
@ -233,11 +231,11 @@ ifeq ($(ZT_ARCHITECTURE),3)
endif
# Build faster crypto on some targets
ifeq ($(ZT_USE_X64_ASM_SALSA2012),1)
override DEFS+=-DZT_USE_X64_ASM_SALSA2012
override CORE_OBJS+=ext/x64-salsa2012-asm/salsa2012.o
ifeq ($(ZT_USE_X64_ASM_CRYPTO),1)
override DEFS+=-DZT_USE_X64_ASM_SALSA2012 -DZT_USE_FAST_X64_ED25519
override CORE_OBJS+=ext/x64-salsa2012-asm/salsa2012.o ext/ed25519-amd64-asm/choose_t.o ext/ed25519-amd64-asm/consts.o ext/ed25519-amd64-asm/fe25519_add.o ext/ed25519-amd64-asm/fe25519_freeze.o ext/ed25519-amd64-asm/fe25519_mul.o ext/ed25519-amd64-asm/fe25519_square.o ext/ed25519-amd64-asm/fe25519_sub.o ext/ed25519-amd64-asm/ge25519_add_p1p1.o ext/ed25519-amd64-asm/ge25519_dbl_p1p1.o ext/ed25519-amd64-asm/ge25519_nielsadd2.o ext/ed25519-amd64-asm/ge25519_nielsadd_p1p1.o ext/ed25519-amd64-asm/ge25519_p1p1_to_p2.o ext/ed25519-amd64-asm/ge25519_p1p1_to_p3.o ext/ed25519-amd64-asm/ge25519_pnielsadd_p1p1.o ext/ed25519-amd64-asm/heap_rootreplaced.o ext/ed25519-amd64-asm/heap_rootreplaced_1limb.o ext/ed25519-amd64-asm/heap_rootreplaced_2limbs.o ext/ed25519-amd64-asm/heap_rootreplaced_3limbs.o ext/ed25519-amd64-asm/sc25519_add.o ext/ed25519-amd64-asm/sc25519_barrett.o ext/ed25519-amd64-asm/sc25519_lt.o ext/ed25519-amd64-asm/sc25519_sub_nored.o ext/ed25519-amd64-asm/ull4_mul.o ext/ed25519-amd64-asm/fe25519_getparity.o ext/ed25519-amd64-asm/fe25519_invert.o ext/ed25519-amd64-asm/fe25519_iseq.o ext/ed25519-amd64-asm/fe25519_iszero.o ext/ed25519-amd64-asm/fe25519_neg.o ext/ed25519-amd64-asm/fe25519_pack.o ext/ed25519-amd64-asm/fe25519_pow2523.o ext/ed25519-amd64-asm/fe25519_setint.o ext/ed25519-amd64-asm/fe25519_unpack.o ext/ed25519-amd64-asm/ge25519_add.o ext/ed25519-amd64-asm/ge25519_base.o ext/ed25519-amd64-asm/ge25519_double.o ext/ed25519-amd64-asm/ge25519_double_scalarmult.o ext/ed25519-amd64-asm/ge25519_isneutral.o ext/ed25519-amd64-asm/ge25519_multi_scalarmult.o ext/ed25519-amd64-asm/ge25519_pack.o ext/ed25519-amd64-asm/ge25519_scalarmult_base.o ext/ed25519-amd64-asm/ge25519_unpackneg.o ext/ed25519-amd64-asm/hram.o ext/ed25519-amd64-asm/index_heap.o ext/ed25519-amd64-asm/sc25519_from32bytes.o ext/ed25519-amd64-asm/sc25519_from64bytes.o ext/ed25519-amd64-asm/sc25519_from_shortsc.o ext/ed25519-amd64-asm/sc25519_iszero.o ext/ed25519-amd64-asm/sc25519_mul.o ext/ed25519-amd64-asm/sc25519_mul_shortsc.o ext/ed25519-amd64-asm/sc25519_slide.o ext/ed25519-amd64-asm/sc25519_to32bytes.o ext/ed25519-amd64-asm/sc25519_window4.o ext/ed25519-amd64-asm/sign.o
endif
ifeq ($(ZT_USE_ARM32_NEON_ASM_SALSA2012),1)
ifeq ($(ZT_USE_ARM32_NEON_ASM_CRYPTO),1)
override DEFS+=-DZT_USE_ARM32_NEON_ASM_SALSA2012
override CORE_OBJS+=ext/arm32-neon-salsa2012-asm/salsa2012.o
endif

View File

@ -20,7 +20,7 @@ Derived from public domain code by D. J. Bernstein.
#pragma warning(disable: 4146)
#endif
namespace ZeroTier {
namespace {
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
@ -31,7 +31,7 @@ namespace ZeroTier {
#define crypto_uint64 uint64_t
#define crypto_hash_sha512_BYTES 64
static inline void add(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
void add(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
{
unsigned int j;
unsigned int u;
@ -40,7 +40,7 @@ static inline void add(unsigned int out[32],const unsigned int a[32],const unsig
u += a[31] + b[31]; out[31] = u;
}
static inline void sub(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
void sub(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
{
unsigned int j;
unsigned int u;
@ -54,7 +54,7 @@ static inline void sub(unsigned int out[32],const unsigned int a[32],const unsig
out[31] = u;
}
static inline void squeeze(unsigned int a[32])
void squeeze(unsigned int a[32])
{
unsigned int j;
unsigned int u;
@ -70,7 +70,7 @@ static const unsigned int minusp[32] = {
19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128
} ;
static inline void freeze(unsigned int a[32])
void freeze(unsigned int a[32])
{
unsigned int aorig[32];
unsigned int j;
@ -82,7 +82,7 @@ static inline void freeze(unsigned int a[32])
for (j = 0;j < 32;++j) a[j] ^= negative & (aorig[j] ^ a[j]);
}
static inline void mult(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
void mult(unsigned int out[32],const unsigned int a[32],const unsigned int b[32])
{
unsigned int i;
unsigned int j;
@ -97,7 +97,7 @@ static inline void mult(unsigned int out[32],const unsigned int a[32],const unsi
squeeze(out);
}
static inline void mult121665(unsigned int out[32],const unsigned int a[32])
void mult121665(unsigned int out[32],const unsigned int a[32])
{
unsigned int j;
unsigned int u;
@ -110,7 +110,7 @@ static inline void mult121665(unsigned int out[32],const unsigned int a[32])
u += out[j]; out[j] = u;
}
static inline void square(unsigned int out[32],const unsigned int a[32])
void square(unsigned int out[32],const unsigned int a[32])
{
unsigned int i;
unsigned int j;
@ -130,7 +130,7 @@ static inline void square(unsigned int out[32],const unsigned int a[32])
squeeze(out);
}
static inline void select(unsigned int p[64],unsigned int q[64],const unsigned int r[64],const unsigned int s[64],unsigned int b)
void select(unsigned int p[64],unsigned int q[64],const unsigned int r[64],const unsigned int s[64],unsigned int b)
{
unsigned int j;
unsigned int t;
@ -268,7 +268,7 @@ static void recip(unsigned int out[32],const unsigned int z[32])
/* 2^255 - 21 */ mult(out,t1,z11);
}
static inline int crypto_scalarmult(unsigned char *q,const unsigned char *n,const unsigned char *p)
int crypto_scalarmult(unsigned char *q,const unsigned char *n,const unsigned char *p)
{
unsigned int work[96];
unsigned char e[32];
@ -287,7 +287,7 @@ static inline int crypto_scalarmult(unsigned char *q,const unsigned char *n,cons
}
static const unsigned char base[32] = {9};
static inline int crypto_scalarmult_base(unsigned char *q,const unsigned char *n)
int crypto_scalarmult_base(unsigned char *q,const unsigned char *n)
{
return crypto_scalarmult(q,n,base);
}
@ -295,10 +295,7 @@ static inline int crypto_scalarmult_base(unsigned char *q,const unsigned char *n
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// This is the Ed25519 stuff from SUPERCOP:
// http://bench.cr.yp.to/supercop.html
// Also public domain, newer version than the Ed25519 found in NaCl
// Ed25519 ref from: http://bench.cr.yp.to/supercop.html
typedef struct
{
@ -306,9 +303,52 @@ typedef struct
}
fe25519;
typedef struct
{
crypto_uint32 v[32];
}
sc25519;
typedef struct
{
crypto_uint32 v[16];
}
shortsc25519;
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
fe25519 t;
} ge25519;
#define ge25519_p3 ge25519
typedef struct
{
fe25519 x;
fe25519 z;
fe25519 y;
fe25519 t;
} ge25519_p1p1;
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
} ge25519_p2;
typedef struct
{
fe25519 x;
fe25519 y;
} ge25519_aff;
static void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y);
static inline crypto_uint32 equal(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
crypto_uint32 equal(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
{
crypto_uint32 x = a ^ b; /* 0: yes; 1..65535: no */
x -= 1; /* 4294967295: yes; 0..65534: no */
@ -316,7 +356,7 @@ static inline crypto_uint32 equal(crypto_uint32 a,crypto_uint32 b) /* 16-bit inp
return x;
}
static inline crypto_uint32 ge(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
crypto_uint32 ge(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
{
unsigned int x = a;
x -= (unsigned int) b; /* 0..65535: yes; 4294901761..4294967295: no */
@ -325,17 +365,17 @@ static inline crypto_uint32 ge(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs
return x;
}
static inline crypto_uint32 times19(crypto_uint32 a)
crypto_uint32 times19(crypto_uint32 a)
{
return (a << 4) + (a << 1) + a;
}
static inline crypto_uint32 times38(crypto_uint32 a)
crypto_uint32 times38(crypto_uint32 a)
{
return (a << 5) + (a << 2) + (a << 1);
}
static inline void reduce_add_sub(fe25519 *r)
void reduce_add_sub(fe25519 *r)
{
crypto_uint32 t;
int i,rep;
@ -355,7 +395,7 @@ static inline void reduce_add_sub(fe25519 *r)
}
}
static inline void reduce_mul(fe25519 *r)
void reduce_mul(fe25519 *r)
{
crypto_uint32 t;
int i,rep;
@ -376,7 +416,7 @@ static inline void reduce_mul(fe25519 *r)
}
/* reduction modulo 2^255-19 */
static inline void fe25519_freeze(fe25519 *r)
void fe25519_freeze(fe25519 *r)
{
int i;
crypto_uint32 m = equal(r->v[31],127);
@ -392,7 +432,7 @@ static inline void fe25519_freeze(fe25519 *r)
r->v[0] -= m&237;
}
static inline void fe25519_unpack(fe25519 *r, const unsigned char x[32])
void fe25519_unpack(fe25519 *r, const unsigned char x[32])
{
int i;
for(i=0;i<32;i++) r->v[i] = x[i];
@ -400,7 +440,7 @@ static inline void fe25519_unpack(fe25519 *r, const unsigned char x[32])
}
/* Assumes input x being reduced below 2^255 */
static inline void fe25519_pack(unsigned char r[32], const fe25519 *x)
void fe25519_pack(unsigned char r[32], const fe25519 *x)
{
int i;
fe25519 y = *x;
@ -409,7 +449,7 @@ static inline void fe25519_pack(unsigned char r[32], const fe25519 *x)
r[i] = y.v[i];
}
static inline int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
{
int i;
fe25519 t1 = *x;
@ -421,7 +461,7 @@ static inline int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
return 1;
}
static inline void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b)
void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b)
{
int i;
crypto_uint32 mask = b;
@ -429,27 +469,27 @@ static inline void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b)
for(i=0;i<32;i++) r->v[i] ^= mask & (x->v[i] ^ r->v[i]);
}
static inline unsigned char fe25519_getparity(const fe25519 *x)
unsigned char fe25519_getparity(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
return t.v[0] & 1;
}
static inline void fe25519_setone(fe25519 *r)
void fe25519_setone(fe25519 *r)
{
int i;
r->v[0] = 1;
for(i=1;i<32;i++) r->v[i]=0;
}
static inline void fe25519_setzero(fe25519 *r)
void fe25519_setzero(fe25519 *r)
{
int i;
for(i=0;i<32;i++) r->v[i]=0;
}
static inline void fe25519_neg(fe25519 *r, const fe25519 *x)
void fe25519_neg(fe25519 *r, const fe25519 *x)
{
fe25519 t;
int i;
@ -458,14 +498,14 @@ static inline void fe25519_neg(fe25519 *r, const fe25519 *x)
fe25519_sub(r, r, &t);
}
static inline void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y)
void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y)
{
int i;
for(i=0;i<32;i++) r->v[i] = x->v[i] + y->v[i];
reduce_add_sub(r);
}
static inline void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y)
void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y)
{
int i;
crypto_uint32 t[32];
@ -476,7 +516,7 @@ static inline void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y)
reduce_add_sub(r);
}
static inline void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y)
void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y)
{
int i,j;
crypto_uint32 t[63];
@ -493,12 +533,12 @@ static inline void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y)
reduce_mul(r);
}
static inline void fe25519_square(fe25519 *r, const fe25519 *x)
void fe25519_square(fe25519 *r, const fe25519 *x)
{
fe25519_mul(r, x, x);
}
static void fe25519_invert(fe25519 *r, const fe25519 *x)
void fe25519_invert(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
@ -565,7 +605,7 @@ static void fe25519_invert(fe25519 *r, const fe25519 *x)
/* 2^255 - 21 */ fe25519_mul(r,&t1,&z11);
}
static void fe25519_pow2523(fe25519 *r, const fe25519 *x)
void fe25519_pow2523(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
@ -619,25 +659,13 @@ static void fe25519_pow2523(fe25519 *r, const fe25519 *x)
/* 2^252 - 3 */ fe25519_mul(r,&t,x);
}
typedef struct
{
crypto_uint32 v[32];
}
sc25519;
typedef struct
{
crypto_uint32 v[16];
}
shortsc25519;
static const crypto_uint32 m[32] = {0xED, 0xD3, 0xF5, 0x5C, 0x1A, 0x63, 0x12, 0x58, 0xD6, 0x9C, 0xF7, 0xA2, 0xDE, 0xF9, 0xDE, 0x14,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
static const crypto_uint32 mu[33] = {0x1B, 0x13, 0x2C, 0x0A, 0xA3, 0xE5, 0x9C, 0xED, 0xA7, 0x29, 0x63, 0x08, 0x5D, 0x21, 0x06, 0x21,
0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F};
static inline crypto_uint32 lt(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
crypto_uint32 lt(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs */
{
unsigned int x = a;
x -= (unsigned int) b; /* 0..65535: no; 4294901761..4294967295: yes */
@ -646,7 +674,7 @@ static inline crypto_uint32 lt(crypto_uint32 a,crypto_uint32 b) /* 16-bit inputs
}
/* Reduce coefficients of r before calling reduce_add_sub */
static inline void reduce_add_sub(sc25519 *r)
void reduce_add_sub(sc25519 *r)
{
crypto_uint32 pb = 0;
crypto_uint32 b;
@ -667,7 +695,7 @@ static inline void reduce_add_sub(sc25519 *r)
}
/* Reduce coefficients of x before calling barrett_reduce */
static inline void barrett_reduce(sc25519 *r, const crypto_uint32 x[64])
void barrett_reduce(sc25519 *r, const crypto_uint32 x[64])
{
/* See HAC, Alg. 14.42 */
int i,j;
@ -718,7 +746,7 @@ static inline void barrett_reduce(sc25519 *r, const crypto_uint32 x[64])
reduce_add_sub(r);
}
static inline void sc25519_from32bytes(sc25519 *r, const unsigned char x[32])
void sc25519_from32bytes(sc25519 *r, const unsigned char x[32])
{
int i;
crypto_uint32 t[64];
@ -727,7 +755,7 @@ static inline void sc25519_from32bytes(sc25519 *r, const unsigned char x[32])
barrett_reduce(r, t);
}
static inline void sc25519_from64bytes(sc25519 *r, const unsigned char x[64])
void sc25519_from64bytes(sc25519 *r, const unsigned char x[64])
{
int i;
crypto_uint32 t[64];
@ -735,13 +763,13 @@ static inline void sc25519_from64bytes(sc25519 *r, const unsigned char x[64])
barrett_reduce(r, t);
}
static inline void sc25519_to32bytes(unsigned char r[32], const sc25519 *x)
void sc25519_to32bytes(unsigned char r[32], const sc25519 *x)
{
int i;
for(i=0;i<32;i++) r[i] = x->v[i];
}
static inline void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y)
void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y)
{
int i, carry;
for(i=0;i<32;i++) r->v[i] = x->v[i] + y->v[i];
@ -754,7 +782,7 @@ static inline void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y)
reduce_add_sub(r);
}
static inline void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y)
void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y)
{
int i,j,carry;
crypto_uint32 t[64];
@ -775,7 +803,7 @@ static inline void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y)
barrett_reduce(r, t);
}
static inline void sc25519_window3(signed char r[85], const sc25519 *s)
void sc25519_window3(signed char r[85], const sc25519 *s)
{
char carry;
int i;
@ -812,7 +840,7 @@ static inline void sc25519_window3(signed char r[85], const sc25519 *s)
r[84] += carry;
}
static inline void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1, const sc25519 *s2)
void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1, const sc25519 *s2)
{
int i;
for(i=0;i<31;i++)
@ -827,14 +855,6 @@ static inline void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1,
r[126] = ((s1->v[31] >> 4) & 3) ^ (((s2->v[31] >> 4) & 3) << 2);
}
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
fe25519 t;
} ge25519;
/* d */
static const fe25519 ge25519_ecd = {{0xA3, 0x78, 0x59, 0x13, 0xCA, 0x4D, 0xEB, 0x75, 0xAB, 0xD8, 0x41, 0x41, 0x4D, 0x0A, 0x70, 0x00,
0x98, 0xE8, 0x79, 0x77, 0x79, 0x40, 0xC7, 0x8C, 0x73, 0xFE, 0x6F, 0x2B, 0xEE, 0x6C, 0x03, 0x52}};
@ -845,30 +865,6 @@ static const fe25519 ge25519_ec2d = {{0x59, 0xF1, 0xB2, 0x26, 0x94, 0x9B, 0xD6,
static const fe25519 ge25519_sqrtm1 = {{0xB0, 0xA0, 0x0E, 0x4A, 0x27, 0x1B, 0xEE, 0xC4, 0x78, 0xE4, 0x2F, 0xAD, 0x06, 0x18, 0x43, 0x2F,
0xA7, 0xD7, 0xFB, 0x3D, 0x99, 0x00, 0x4D, 0x2B, 0x0B, 0xDF, 0xC1, 0x4F, 0x80, 0x24, 0x83, 0x2B}};
#define ge25519_p3 ge25519
typedef struct
{
fe25519 x;
fe25519 z;
fe25519 y;
fe25519 t;
} ge25519_p1p1;
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
} ge25519_p2;
typedef struct
{
fe25519 x;
fe25519 y;
} ge25519_aff;
/* Packed coordinates of the base point */
static const ge25519 ge25519_base = {{{0x1A, 0xD5, 0x25, 0x8F, 0x60, 0x2D, 0x56, 0xC9, 0xB2, 0xA7, 0x25, 0x95, 0x60, 0xC7, 0x2C, 0x69,
0x5C, 0xDC, 0xD6, 0xFD, 0x31, 0xE2, 0xA4, 0xC0, 0xFE, 0x53, 0x6E, 0xCD, 0xD3, 0x36, 0x69, 0x21}},
@ -1733,27 +1729,27 @@ static const ge25519_aff ge25519_base_multiples_affine[425] = {
{{0x69, 0x3e, 0x47, 0x97, 0x2c, 0xaf, 0x52, 0x7c, 0x78, 0x83, 0xad, 0x1b, 0x39, 0x82, 0x2f, 0x02, 0x6f, 0x47, 0xdb, 0x2a, 0xb0, 0xe1, 0x91, 0x99, 0x55, 0xb8, 0x99, 0x3a, 0xa0, 0x44, 0x11, 0x51}}}
};
static inline void p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p)
void p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p)
{
fe25519_mul(&r->x, &p->x, &p->t);
fe25519_mul(&r->y, &p->y, &p->z);
fe25519_mul(&r->z, &p->z, &p->t);
}
static inline void p1p1_to_p2_2(ge25519_p3 *r, const ge25519_p1p1 *p)
void p1p1_to_p2_2(ge25519_p3 *r, const ge25519_p1p1 *p)
{
fe25519_mul(&r->x, &p->x, &p->t);
fe25519_mul(&r->y, &p->y, &p->z);
fe25519_mul(&r->z, &p->z, &p->t);
}
static inline void p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p)
void p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p)
{
p1p1_to_p2_2(r, p);
fe25519_mul(&r->t, &p->x, &p->y);
}
static void ge25519_mixadd2(ge25519_p3 *r, const ge25519_aff *q)
void ge25519_mixadd2(ge25519_p3 *r, const ge25519_aff *q)
{
fe25519 a,b,t1,t2,c,d,e,f,g,h,qt;
fe25519_mul(&qt, &q->x, &q->y);
@ -1776,7 +1772,7 @@ static void ge25519_mixadd2(ge25519_p3 *r, const ge25519_aff *q)
fe25519_mul(&r->t, &e, &h);
}
static void add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q)
void add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q)
{
fe25519 a, b, c, d, t;
@ -1797,7 +1793,7 @@ static void add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q)
}
/* See http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html#doubling-dbl-2008-hwcd */
static void dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p)
void dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p)
{
fe25519 a,b,c,d;
fe25519_square(&a, &p->x);
@ -1816,13 +1812,13 @@ static void dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p)
}
/* Constant-time version of: if(b) r = p */
static inline void cmov_aff(ge25519_aff *r, const ge25519_aff *p, unsigned char b)
void cmov_aff(ge25519_aff *r, const ge25519_aff *p, unsigned char b)
{
fe25519_cmov(&r->x, &p->x, b);
fe25519_cmov(&r->y, &p->y, b);
}
static inline unsigned char equal(signed char b,signed char c)
unsigned char equal(signed char b,signed char c)
{
unsigned char ub = b;
unsigned char uc = c;
@ -1833,14 +1829,14 @@ static inline unsigned char equal(signed char b,signed char c)
return (unsigned char)y;
}
static inline unsigned char negative(signed char b)
unsigned char negative(signed char b)
{
unsigned long long x = b; /* 18446744073709551361..18446744073709551615: yes; 0..255: no */
x >>= 63; /* 1: yes; 0: no */
return (unsigned char)x;
}
static inline void choose_t(ge25519_aff *t, unsigned long long pos, signed char b)
void choose_t(ge25519_aff *t, unsigned long long pos, signed char b)
{
/* constant time */
fe25519 v;
@ -1853,7 +1849,7 @@ static inline void choose_t(ge25519_aff *t, unsigned long long pos, signed char
fe25519_cmov(&t->x, &v, negative(b));
}
static inline void setneutral(ge25519 *r)
void setneutral(ge25519 *r)
{
fe25519_setzero(&r->x);
fe25519_setone(&r->y);
@ -1862,7 +1858,7 @@ static inline void setneutral(ge25519 *r)
}
/* return 0 on success, -1 otherwise */
static int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32])
int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32])
{
unsigned char par;
fe25519 t, chk, num, den, den2, den4, den6;
@ -1909,7 +1905,7 @@ static int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32])
return 0;
}
static inline void ge25519_pack(unsigned char r[32], const ge25519_p3 *p)
void ge25519_pack(unsigned char r[32], const ge25519_p3 *p)
{
fe25519 tx, ty, zi;
fe25519_invert(&zi, &p->z);
@ -1920,7 +1916,7 @@ static inline void ge25519_pack(unsigned char r[32], const ge25519_p3 *p)
}
/* computes [s1]p1 + [s2]p2 */
static void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const ge25519_p3 *p2, const sc25519 *s2)
void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const ge25519_p3 *p2, const sc25519 *s2)
{
ge25519_p1p1 tp1p1;
ge25519_p3 pre[16];
@ -1965,7 +1961,7 @@ static void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p
}
}
static inline void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s)
void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s)
{
signed char b[85];
int i;
@ -1982,7 +1978,7 @@ static inline void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s)
}
}
static inline void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen)
void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen)
{
unsigned long long i;
@ -1991,12 +1987,20 @@ static inline void get_hram(unsigned char *hram, const unsigned char *sm, const
for (i = 64;i < smlen;++i) playground[i] = sm[i];
//crypto_hash_sha512(hram,playground,smlen);
SHA512::hash(hram,playground,(unsigned int)smlen);
ZeroTier::SHA512::hash(hram,playground,(unsigned int)smlen);
}
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
} // anonymous namespace
#ifdef ZT_USE_FAST_X64_ED25519
extern "C" void ed25519_amd64_asm_sign(const unsigned char *sk,const unsigned char *pk,const unsigned char *m,const unsigned int mlen,unsigned char *sig);
#endif
namespace ZeroTier {
void C25519::agree(const C25519::Private &mine,const C25519::Public &their,void *keybuf,unsigned int keylen)
{
unsigned char rawkey[32];
@ -2015,6 +2019,9 @@ void C25519::agree(const C25519::Private &mine,const C25519::Public &their,void
void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPublic,const void *msg,unsigned int len,void *signature)
{
#ifdef ZT_USE_FAST_X64_ED25519
ed25519_amd64_asm_sign(myPrivate.data + 32,myPublic.data + 32,(const unsigned char *)msg,len,(unsigned char *)signature);
#else
sc25519 sck, scs, scsk;
ge25519 ger;
unsigned char r[32];
@ -2038,7 +2045,6 @@ void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPubli
sig[64 + i] = digest[i];
SHA512::hash(hmg,sig + 32,64);
//crypto_hash_sha512(hmg, sm+32, mlen+32); /* Generate k as h(extsk[32],...,extsk[63],m) */
/* Computation of R */
sc25519_from64bytes(&sck, hmg);
@ -2060,6 +2066,7 @@ void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPubli
sc25519_to32bytes(s,&scs); /* cat s */
for(unsigned int i=0;i<32;i++)
sig[32 + i] = s[i];
#endif
}
bool C25519::verify(const C25519::Public &their,const void *msg,unsigned int len,const void *signature)

View File

@ -14,6 +14,36 @@ Public domain.
#include "SHA512.hpp"
#include "Utils.hpp"
#ifdef __APPLE__
#include <CommonCrypto/CommonDigest.h>
#define ZT_HAVE_NATIVE_SHA512
namespace ZeroTier {
void SHA512::hash(void *digest,const void *data,unsigned int len)
{
CC_SHA512_CTX ctx;
CC_SHA512_Init(&ctx);
CC_SHA512_Update(&ctx,data,len);
CC_SHA512_Final(reinterpret_cast<unsigned char *>(digest),&ctx);
}
}
#endif
#ifdef ZT_USE_LIBCRYPTO
#include <openssl/sha.h>
#define ZT_HAVE_NATIVE_SHA512
namespace ZeroTier {
void SHA512::hash(void *digest,const void *data,unsigned int len)
{
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx,data,len);
SHA512_Final(reinterpret_cast<unsigned char *>(digest),&ctx);
}
}
#endif
#ifndef ZT_HAVE_NATIVE_SHA512
namespace ZeroTier {
#define uint64 uint64_t
@ -276,9 +306,6 @@ static const unsigned char iv[64] = {
0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79
};
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
void SHA512::hash(void *digest,const void *data,unsigned int len)
{
unsigned char h[64];
@ -329,3 +356,12 @@ void SHA512::hash(void *digest,const void *data,unsigned int len)
}
} // namespace ZeroTier
#endif // !ZT_HAVE_NATIVE_SHA512
// Internally re-export to included C code, which includes some fast crypto code ported in on some platforms.
// This eliminates the need to link against a third party SHA512() from this code
extern "C" void ZT_sha512internal(void *digest,const void *data,unsigned int len)
{
ZeroTier::SHA512::hash(digest,data,len);
}

View File

@ -381,7 +381,7 @@ std::vector<std::string> OSUtils::split(const char *s,const char *const sep,cons
std::string OSUtils::platformDefaultHomePath()
{
#ifdef __QNAP__
char *cmd = "/sbin/getcfg ZeroTier Install_Path -f /etc/config/qpkg.conf";
char *cmd = "/sbin/getcfg zerotier Install_Path -f /etc/config/qpkg.conf";
char buf[128];
FILE *fp;
if ((fp = popen(cmd, "r")) == NULL) {

View File

@ -376,11 +376,11 @@ static int testCrypto()
C25519::Pair bp[8];
for(int k=0;k<8;++k)
bp[k] = C25519::generate();
const uint64_t st = OSUtils::now();
uint64_t st = OSUtils::now();
for(unsigned int k=0;k<50;++k) {
C25519::agree(bp[~k & 7],bp[k & 7].pub,buf1,64);
}
const uint64_t et = OSUtils::now();
uint64_t et = OSUtils::now();
std::cout << ((double)(et - st) / 50.0) << "ms per agreement." << std::endl;
std::cout << "[crypto] Testing Ed25519 ECC signatures... "; std::cout.flush();
@ -419,6 +419,15 @@ static int testCrypto()
}
std::cout << "PASS" << std::endl;
std::cout << "[crypto] Benchmarking Ed25519 ECC signatures... "; std::cout.flush();
st = OSUtils::now();
for(int k=0;k<1000;++k) {
C25519::Signature sig;
C25519::sign(didntSign.priv,didntSign.pub,buf1,sizeof(buf1),sig.data);
}
et = OSUtils::now();
std::cout << ((double)(et - st) / 50.0) << "ms per signature." << std::endl;
return 0;
}

View File

@ -2276,7 +2276,7 @@ public:
// Check to see if we've already written this first. This reduces
// redundant writes and I/O overhead on most platforms and has
// little effect on others.
f = fopen(p,"r");
f = fopen(p,"rb");
if (f) {
char buf[65535];
long l = (long)fread(buf,1,sizeof(buf),f);
@ -2285,10 +2285,10 @@ public:
return;
}
f = fopen(p,"w");
f = fopen(p,"wb");
if ((!f)&&(dirname[0])) { // create subdirectory if it does not exist
OSUtils::mkdir(dirname);
f = fopen(p,"w");
f = fopen(p,"wb");
}
if (f) {
if (fwrite(data,len,1,f) != 1)
@ -2417,7 +2417,7 @@ public:
default:
return -1;
}
FILE *f = fopen(p,"r");
FILE *f = fopen(p,"rb");
if (f) {
int n = (int)fread(data,1,maxlen,f);
fclose(f);