Commit d383dfe9 authored by Michael Hamburg's avatar Michael Hamburg

going to GMP-style element[1] types

Conflicts:
	src/include/ec_point.h
	src/p448/magic.c
	src/p480/magic.c
	src/p521/magic.c
	test/bench.c
parent b0a21107
......@@ -13,78 +13,75 @@
mask_t
field_eq (
const struct field_t *a,
const struct field_t *b
const field_a_t a,
const field_a_t b
) {
struct field_t ra, rb;
field_copy(&ra, a);
field_copy(&rb, b);
field_weak_reduce(&ra);
field_weak_reduce(&rb);
field_sub_RAW(&ra, &ra, &rb);
field_bias(&ra, 2);
return field_is_zero(&ra);
field_a_t ra, rb;
field_copy(ra, a);
field_copy(rb, b);
field_weak_reduce(ra);
field_weak_reduce(rb);
field_sub_RAW(ra, ra, rb);
field_bias(ra, 2);
return field_is_zero(ra);
}
void
field_inverse (
struct field_t* a,
const struct field_t* x
field_a_t a,
const field_a_t x
) {
struct field_t L0, L1;
field_isr ( &L0, x );
field_sqr ( &L1, &L0 );
field_sqr ( &L0, &L1 );
field_mul ( a, x, &L0 );
field_a_t L0, L1;
field_isr ( L0, x );
field_sqr ( L1, L0 );
field_sqr ( L0, L1 );
field_mul ( a, x, L0 );
}
mask_t
field_is_square (
const struct field_t* x
const field_a_t x
) {
mask_t L2, L3;
struct field_t L0, L1;
field_isr ( &L0, x );
field_sqr ( &L1, &L0 );
field_mul ( &L0, x, &L1 );
field_subw( &L0, 1 );
L3 = field_is_zero( &L0 );
L2 = field_is_zero( x );
return L3 | L2;
field_a_t L0, L1;
field_isr ( L0, x );
field_sqr ( L1, L0 );
field_mul ( L0, x, L1 );
field_subw( L0, 1 );
return field_is_zero( L0 ) | field_is_zero( x );
}
void
field_simultaneous_invert (
struct field_t *__restrict__ out,
const struct field_t *in,
field_a_t *__restrict__ out,
const field_a_t *in,
unsigned int n
) {
if (n==0) {
return;
} else if (n==1) {
field_inverse(out,in);
field_inverse(out[0],in[0]);
return;
}
field_copy(&out[1], &in[0]);
field_copy(out[1], in[0]);
int i;
for (i=1; i<(int) (n-1); i++) {
field_mul(&out[i+1], &out[i], &in[i]);
field_mul(out[i+1], out[i], in[i]);
}
field_mul(&out[0], &out[n-1], &in[n-1]);
field_mul(out[0], out[n-1], in[n-1]);
struct field_t tmp;
field_inverse(&tmp, &out[0]);
field_copy(&out[0], &tmp);
field_a_t tmp;
field_inverse(tmp, out[0]);
field_copy(out[0], tmp);
/* at this point, out[0] = product(in[i]) ^ -1
* out[i] = product(in[0]..in[i-1]) if i != 0
*/
for (i=n-1; i>0; i--) {
field_mul(&tmp, &out[i], &out[0]);
field_copy(&out[i], &tmp);
field_mul(tmp, out[i], out[0]);
field_copy(out[i], tmp);
field_mul(&tmp, &out[0], &in[i]);
field_copy(&out[0], &tmp);
field_mul(tmp, out[0], in[i]);
field_copy(out[0], tmp);
}
}
This diff is collapsed.
......@@ -162,7 +162,7 @@ goldilocks_derive_private_key (
struct sha512_ctx_t ctx;
struct tw_extensible_t exta;
struct field_t pk;
field_a_t pk;
sha512_init(&ctx);
sha512_update(&ctx, (const unsigned char *)"derivepk", GOLDI_DIVERSIFY_BYTES);
......@@ -173,9 +173,9 @@ goldilocks_derive_private_key (
barrett_serialize(privkey->opaque, sk, GOLDI_FIELD_BYTES);
scalarmul_fixed_base(&exta, sk, GOLDI_SCALAR_BITS, &goldilocks_global.fixed_base);
untwist_and_double_and_serialize(&pk, &exta);
untwist_and_double_and_serialize(pk, &exta);
field_serialize(&privkey->opaque[GOLDI_FIELD_BYTES], &pk);
field_serialize(&privkey->opaque[GOLDI_FIELD_BYTES], pk);
return GOLDI_EOK;
}
......@@ -225,11 +225,11 @@ goldilocks_private_to_public (
struct goldilocks_public_key_t *pubkey,
const struct goldilocks_private_key_t *privkey
) {
struct field_t pk;
mask_t msucc = field_deserialize(&pk,&privkey->opaque[GOLDI_FIELD_BYTES]);
field_a_t pk;
mask_t msucc = field_deserialize(pk,&privkey->opaque[GOLDI_FIELD_BYTES]);
if (msucc) {
field_serialize(pubkey->opaque, &pk);
field_serialize(pubkey->opaque, pk);
return GOLDI_EOK;
} else {
return GOLDI_ECORRUPT;
......@@ -252,15 +252,15 @@ goldilocks_shared_secret_core (
assert(GOLDI_SHARED_SECRET_BYTES == SHA512_OUTPUT_BYTES);
word_t sk[GOLDI_FIELD_WORDS];
struct field_t pk;
field_a_t pk;
mask_t succ = field_deserialize(&pk,your_pubkey->opaque), msucc = -1;
mask_t succ = field_deserialize(pk,your_pubkey->opaque), msucc = -1;
#ifdef EXPERIMENT_ECDH_STIR_IN_PUBKEYS
struct field_t sum, prod;
msucc &= field_deserialize(&sum,&my_privkey->opaque[GOLDI_FIELD_BYTES]);
field_mul(&prod,&pk,&sum);
field_add(&sum,&pk,&sum);
field_a_t sum, prod;
msucc &= field_deserialize(sum,&my_privkey->opaque[GOLDI_FIELD_BYTES]);
field_mul(prod,pk,sum);
field_add(sum,pk,sum);
#endif
msucc &= barrett_deserialize(sk,my_privkey->opaque,&curve_prime_order);
......@@ -269,17 +269,17 @@ goldilocks_shared_secret_core (
if (pre) {
struct tw_extensible_t tw;
succ &= scalarmul_fixed_base(&tw, sk, GOLDI_SCALAR_BITS, &pre->table);
untwist_and_double_and_serialize(&pk, &tw);
untwist_and_double_and_serialize(pk, &tw);
} else {
succ &= montgomery_ladder(&pk,&pk,sk,GOLDI_SCALAR_BITS,1);
succ &= montgomery_ladder(pk,pk,sk,GOLDI_SCALAR_BITS,1);
}
#else
(void)pre;
succ &= montgomery_ladder(&pk,&pk,sk,GOLDI_SCALAR_BITS,1);
succ &= montgomery_ladder(pk,pk,sk,GOLDI_SCALAR_BITS,1);
#endif
field_serialize(gxy,&pk);
field_serialize(gxy,pk);
/* obliterate records of our failure by adjusting with obliteration key */
struct sha512_ctx_t ctx;
......@@ -300,9 +300,9 @@ goldilocks_shared_secret_core (
#ifdef EXPERIMENT_ECDH_STIR_IN_PUBKEYS
/* stir in the sum and product of the pubkeys. */
uint8_t a_pk[GOLDI_FIELD_BYTES];
field_serialize(a_pk, &sum);
field_serialize(a_pk, sum);
sha512_update(&ctx, a_pk, GOLDI_FIELD_BYTES);
field_serialize(a_pk, &prod);
field_serialize(a_pk, prod);
sha512_update(&ctx, a_pk, GOLDI_FIELD_BYTES);
#endif
......@@ -383,11 +383,11 @@ goldilocks_sign (
/* 4[nonce]G */
uint8_t signature_tmp[GOLDI_FIELD_BYTES];
struct tw_extensible_t exta;
struct field_t gsk;
field_a_t gsk;
scalarmul_fixed_base(&exta, tk, GOLDI_SCALAR_BITS, &goldilocks_global.fixed_base);
double_tw_extensible(&exta);
untwist_and_double_and_serialize(&gsk, &exta);
field_serialize(signature_tmp, &gsk);
untwist_and_double_and_serialize(gsk, &exta);
field_serialize(signature_tmp, gsk);
word_t challenge[GOLDI_FIELD_WORDS];
goldilocks_derive_challenge (
......@@ -437,10 +437,10 @@ goldilocks_verify (
return GOLDI_EUNINIT;
}
struct field_t pk;
field_a_t pk;
word_t s[GOLDI_FIELD_WORDS];
mask_t succ = field_deserialize(&pk,pubkey->opaque);
mask_t succ = field_deserialize(pk,pubkey->opaque);
if (!succ) return GOLDI_EINVAL;
succ = barrett_deserialize(s, &signature[GOLDI_FIELD_BYTES], &curve_prime_order);
......@@ -449,14 +449,14 @@ goldilocks_verify (
word_t challenge[GOLDI_FIELD_WORDS];
goldilocks_derive_challenge(challenge, pubkey->opaque, signature, message, message_len);
struct field_t eph;
field_a_t eph;
struct tw_extensible_t pk_text;
/* deserialize [nonce]G */
succ = field_deserialize(&eph, signature);
succ = field_deserialize(eph, signature);
if (!succ) return GOLDI_EINVAL;
succ = deserialize_and_twist_approx(&pk_text, &sqrt_d_minus_1, &pk);
succ = deserialize_and_twist_approx(&pk_text, pk);
if (!succ) return GOLDI_EINVAL;
linear_combo_var_fixed_vt( &pk_text,
......@@ -464,9 +464,9 @@ goldilocks_verify (
s, GOLDI_SCALAR_BITS,
goldilocks_global.wnafs, WNAF_PRECMP_BITS );
untwist_and_double_and_serialize( &pk, &pk_text );
untwist_and_double_and_serialize( pk, &pk_text );
succ = field_eq(&eph, &pk);
succ = field_eq(eph, pk);
return succ ? 0 : GOLDI_EINVAL;
}
#endif
......@@ -485,14 +485,14 @@ goldilocks_precompute_public_key (
struct tw_extensible_t pk_text;
struct field_t pk;
mask_t succ = field_deserialize(&pk, pub->opaque);
field_a_t pk;
mask_t succ = field_deserialize(pk, pub->opaque);
if (!succ) {
free(precom);
return NULL;
}
succ = deserialize_and_twist_approx(&pk_text, &sqrt_d_minus_1, &pk);
succ = deserialize_and_twist_approx(&pk_text, pk);
if (!succ) {
free(precom);
return NULL;
......@@ -538,11 +538,11 @@ goldilocks_verify_precomputed (
word_t challenge[GOLDI_FIELD_WORDS];
goldilocks_derive_challenge(challenge, pubkey->pub.opaque, signature, message, message_len);
struct field_t eph, pk;
field_a_t eph, pk;
struct tw_extensible_t pk_text;
/* deserialize [nonce]G */
succ = field_deserialize(&eph, signature);
succ = field_deserialize(eph, signature);
if (!succ) return GOLDI_EINVAL;
succ = linear_combo_combs_vt (
......@@ -552,9 +552,9 @@ goldilocks_verify_precomputed (
);
if (!succ) return GOLDI_EINVAL;
untwist_and_double_and_serialize( &pk, &pk_text );
untwist_and_double_and_serialize( pk, &pk_text );
succ = field_eq(&eph, &pk);
succ = field_eq(eph, pk);
return succ ? 0 : GOLDI_EINVAL;
}
......
......@@ -21,28 +21,28 @@ extern "C" {
* Affine point on an Edwards curve.
*/
struct affine_t {
struct field_t x, y;
field_a_t x, y;
};
/**
* Affine point on a twisted Edwards curve.
*/
struct tw_affine_t {
struct field_t x, y;
field_a_t x, y;
};
/**
* Montgomery buffer.
*/
struct montgomery_t {
struct field_t z0, xd, zd, xa, za;
field_a_t z0, xd, zd, xa, za;
};
/**
* Montgomery buffer, augmented version.
*/
struct montgomery_aux_t {
struct field_t s0, xd, zd, xa, za, xs, zs;
field_a_t s0, xd, zd, xa, za, xs, zs;
};
/**
......@@ -64,7 +64,7 @@ struct montgomery_aux_t {
* instead.
*/
struct extensible_t {
struct field_t x, y, z, t, u;
field_a_t x, y, z, t, u;
};
/**
......@@ -72,7 +72,7 @@ struct extensible_t {
* suitable for accumulators.
*/
struct tw_extensible_t {
struct field_t x, y, z, t, u;
field_a_t x, y, z, t, u;
};
/**
......@@ -81,7 +81,7 @@ struct tw_extensible_t {
* Good for mixed readdition; suitable for fixed tables.
*/
struct tw_niels_t {
struct field_t a, b, c;
field_a_t a, b, c;
};
/**
......@@ -91,7 +91,7 @@ struct tw_niels_t {
*/
struct tw_pniels_t {
struct tw_niels_t n;
struct field_t z;
field_a_t z;
};
......@@ -285,20 +285,20 @@ montgomery_aux_step (
void
deserialize_montgomery (
struct montgomery_t* a,
const struct field_t* sbz
const field_a_t sbz
);
mask_t
serialize_montgomery (
struct field_t* b,
field_a_t b,
const struct montgomery_t* a,
const struct field_t* sbz
const field_a_t sbz
);
void
deserialize_montgomery_decaf (
struct montgomery_aux_t* a,
const struct field_t *s
const field_a_t s
);
/**
......@@ -314,7 +314,7 @@ deserialize_montgomery_decaf (
*/
void
serialize_extensible (
struct field_t* b,
field_a_t b,
const struct extensible_t* a
);
......@@ -323,7 +323,7 @@ serialize_extensible (
*/
void
untwist_and_double_and_serialize (
struct field_t* b,
field_a_t b,
const struct tw_extensible_t* a
);
......@@ -363,7 +363,7 @@ test_only_twist (
mask_t
field_is_square (
const struct field_t* x
const field_a_t x
);
mask_t
......@@ -382,7 +382,7 @@ is_even_tw (
mask_t
deserialize_affine (
struct affine_t* a,
const struct field_t* sz
const field_a_t sz
);
/**
......@@ -395,22 +395,21 @@ deserialize_affine (
mask_t
deserialize_and_twist_approx (
struct tw_extensible_t* a,
const struct field_t* sdm1,
const struct field_t* sz
const field_a_t sz
)
__attribute__((warn_unused_result));
mask_t
decaf_deserialize_affine (
struct affine_t *a,
const struct field_t *s,
struct affine_t *a,
const field_a_t s,
mask_t allow_identity
)
__attribute__((warn_unused_result));
void
decaf_serialize_extensible (
struct field_t* b,
field_a_t b,
const struct extensible_t* a
);
......@@ -418,14 +417,14 @@ decaf_serialize_extensible (
mask_t
decaf_deserialize_tw_affine (
struct tw_affine_t *a,
const struct field_t *s,
const field_a_t s,
mask_t allow_identity
)
__attribute__((warn_unused_result));
void
decaf_serialize_tw_extensible (
struct field_t* b,
field_a_t b,
const struct tw_extensible_t* a
);
......@@ -465,7 +464,7 @@ eq_tw_extensible (
void
elligator_2s_inject (
struct affine_t* a,
const struct field_t* r
const field_a_t r
);
mask_t
......@@ -516,8 +515,8 @@ cond_negate_tw_niels (
struct tw_niels_t *n,
mask_t doNegate
) {
constant_time_cond_swap(&n->a, &n->b, sizeof(n->a), doNegate);
field_cond_neg(&n->c, doNegate);
constant_time_cond_swap(n->a, n->b, sizeof(n->a), doNegate);
field_cond_neg(n->c, doNegate);
}
/**
......@@ -537,8 +536,8 @@ copy_affine (
struct affine_t* a,
const struct affine_t* ds
) {
field_copy ( &a->x, &ds->x );
field_copy ( &a->y, &ds->y );
field_copy ( a->x, ds->x );
field_copy ( a->y, ds->y );
}
void
......@@ -546,8 +545,8 @@ copy_tw_affine (
struct tw_affine_t* a,
const struct tw_affine_t* ds
) {
field_copy ( &a->x, &ds->x );
field_copy ( &a->y, &ds->y );
field_copy ( a->x, ds->x );
field_copy ( a->y, ds->y );
}
void
......@@ -555,11 +554,11 @@ copy_montgomery (
struct montgomery_t* a,
const struct montgomery_t* ds
) {
field_copy ( &a->z0, &ds->z0 );
field_copy ( &a->xd, &ds->xd );
field_copy ( &a->zd, &ds->zd );
field_copy ( &a->xa, &ds->xa );
field_copy ( &a->za, &ds->za );
field_copy ( a->z0, ds->z0 );
field_copy ( a->xd, ds->xd );
field_copy ( a->zd, ds->zd );
field_copy ( a->xa, ds->xa );
field_copy ( a->za, ds->za );
}
void
......@@ -567,11 +566,11 @@ copy_extensible (
struct extensible_t* a,
const struct extensible_t* ds
) {
field_copy ( &a->x, &ds->x );
field_copy ( &a->y, &ds->y );
field_copy ( &a->z, &ds->z );
field_copy ( &a->t, &ds->t );
field_copy ( &a->u, &ds->u );
field_copy ( a->x, ds->x );
field_copy ( a->y, ds->y );
field_copy ( a->z, ds->z );
field_copy ( a->t, ds->t );
field_copy ( a->u, ds->u );
}
void
......@@ -579,11 +578,11 @@ copy_tw_extensible (
struct tw_extensible_t* a,
const struct tw_extensible_t* ds
) {
field_copy ( &a->x, &ds->x );
field_copy ( &a->y, &ds->y );
field_copy ( &a->z, &ds->z );
field_copy ( &a->t, &ds->t );
field_copy ( &a->u, &ds->u );
field_copy ( a->x, ds->x );
field_copy ( a->y, ds->y );
field_copy ( a->z, ds->z );
field_copy ( a->t, ds->t );
field_copy ( a->u, ds->u );
}
void
......@@ -591,9 +590,9 @@ copy_tw_niels (
struct tw_niels_t* a,
const struct tw_niels_t* ds
) {
field_copy ( &a->a, &ds->a );
field_copy ( &a->b, &ds->b );
field_copy ( &a->c, &ds->c );
field_copy ( a->a, ds->a );
field_copy ( a->b, ds->b );
field_copy ( a->c, ds->c );
}
void
......@@ -602,7 +601,7 @@ copy_tw_pniels (
const struct tw_pniels_t* ds
) {
copy_tw_niels( &a->n, &ds->n );
field_copy ( &a->z, &ds->z );
field_copy ( a->z, ds->z );
}
#ifdef __cplusplus
......
......@@ -14,6 +14,9 @@
#include "f_field.h"
#include <string.h>
typedef struct field_t field_a_t[1];
#define field_a_restrict_t struct field_t *__restrict__
#define is32 (GOLDI_BITS == 32 || FIELD_BITS != 448)
#if (is32)
#define IF32(s) (s)
......@@ -54,8 +57,8 @@ extern const uint8_t FIELD_MODULUS[FIELD_BYTES];
static inline void
__attribute__((unused,always_inline))
field_copy (
struct field_t *__restrict__ a,
const struct field_t *__restrict__ b
field_a_restrict_t a,
const field_a_restrict_t b
) {
memcpy(a,b,sizeof(*a));
}
......@@ -70,8 +73,8 @@ field_copy (
*/
void
field_isr (
struct field_t* a,
const struct field_t* x
field_a_t a,
const field_a_t x
);
/**
......@@ -81,8 +84,8 @@ field_isr (
*/
void
field_simultaneous_invert (
struct field_t *__restrict__ out,
const struct field_t *in,
field_a_t *__restrict__ out,
const field_a_t *in,
unsigned int n
);
......@@ -93,8 +96,8 @@ field_simultaneous_invert (
*/
void
field_inverse (
struct field_t* a,
const struct field_t* x
field_a_t a,
const field_a_t x
);
/**
......@@ -102,8 +105,8 @@ field_inverse (
*/
mask_t
field_eq (
const struct field_t *a,
const struct field_t *b
const field_a_t a,
const field_a_t b
);
/**
......@@ -112,23 +115,23 @@ field_eq (
static __inline__ void
__attribute__((unused,always_inline))
field_sqrn (
field_t *__restrict__ y,
const field_t *x,
field_a_restrict_t y,
const field_a_t x,
int n
) {
field_t tmp;
field_a_t tmp;
assert(n>0);
if (n&1) {
field_sqr(y,x);
n--;
} else {
field_sqr(&tmp,x);
field_sqr(y,&tmp);
field_sqr(tmp,x);
field_sqr(y,tmp);
n-=2;
}
for (; n; n-=2) {
field_sqr(&tmp,y);
field_sqr(y,&tmp);
field_sqr(tmp,y);
field_sqr(y,tmp);
}
}
......@@ -152,8 +155,8 @@ field_make_nonzero (struct field_t *f) {
/* Multiply by signed curve constant */
static __inline__ void
field_mulw_scc (
struct field_t* __restrict__ out,
const struct field_t *a,
field_a_restrict_t out,
const field_a_t a,
int64_t scc
) {
if (scc >= 0) {
......@@ -168,8 +171,8 @@ field_mulw_scc (
/* Multiply by signed curve constant and weak reduce if biased */
static __inline__ void
field_mulw_scc_wr (
struct field_t* __restrict__ out,
const struct field_t *a,
field_a_restrict_t out,
const field_a_t a,
int64_t scc
) {
field_mulw_scc(out, a, scc);
......@@ -179,9 +182,9 @@ field_mulw_scc_wr (
static __inline__ void
field_subx_RAW (
struct field_t *d,
const struct field_t *a,
const struct field_t *b
field_a_t d,
const field_a_t a,
const field_a_t b
) {
field_sub_RAW ( d, a, b );
field_bias( d, 2 );
......@@ -190,9 +193,9 @@ field_subx_RAW (
static __inline__ void
field_sub (
struct field_t *d,
const struct field_t *a,
const struct field_t *b
field_a_t d,
const field_a_t a,
const field_a_t b
) {
field_sub_RAW ( d, a, b );
field_bias( d, 2 );
......@@ -201,9 +204,9 @@ field_sub (
static __inline__ void
field_add (
struct field_t *d,
const struct field_t *a,
const struct field_t *b