Skip to content

Commit

Permalink
rename internal scalar implementations using s/scalar/scalar_impl
Browse files Browse the repository at this point in the history
- In non-VERIFY mode, scalar operations are identical to their
internal scalar implementation
- in VERIFY mode, scalar operations are defined to use
secp256k1_scalar_verify checks at entry/exit of functions in
addition to using the internal scalar implementation
  • Loading branch information
stratospher committed Jul 6, 2023
1 parent b20b30d commit c53faec
Show file tree
Hide file tree
Showing 5 changed files with 276 additions and 115 deletions.
28 changes: 28 additions & 0 deletions src/scalar.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,34 @@
#error "Please select wide multiplication implementation"
#endif

#ifndef VERIFY
/* In non-VERIFY mode, we #define the scalar operations to be identical to their
* internal scalar implementation, to avoid the potential overhead of a
* function call (even though presumably inlinable). */
# define secp256k1_scalar_clear secp256k1_scalar_impl_clear
# define secp256k1_scalar_get_bits secp256k1_scalar_impl_get_bits
# define secp256k1_scalar_get_bits_var secp256k1_scalar_impl_get_bits_var
# define secp256k1_scalar_check_overflow secp256k1_scalar_impl_check_overflow
# define secp256k1_scalar_set_b32 secp256k1_scalar_impl_set_b32
# define secp256k1_scalar_set_int secp256k1_scalar_impl_set_int
# define secp256k1_scalar_get_b32 secp256k1_scalar_impl_get_b32
# define secp256k1_scalar_add secp256k1_scalar_impl_add
# define secp256k1_scalar_cadd_bit secp256k1_scalar_impl_cadd_bit
# define secp256k1_scalar_mul secp256k1_scalar_impl_mul
# define secp256k1_scalar_shr_int secp256k1_scalar_impl_shr_int
# define secp256k1_scalar_inverse secp256k1_scalar_impl_inverse
# define secp256k1_scalar_inverse_var secp256k1_scalar_impl_inverse_var
# define secp256k1_scalar_negate secp256k1_scalar_impl_negate
# define secp256k1_scalar_is_zero secp256k1_scalar_impl_is_zero
# define secp256k1_scalar_is_one secp256k1_scalar_impl_is_one
# define secp256k1_scalar_is_even secp256k1_scalar_impl_is_even
# define secp256k1_scalar_is_high secp256k1_scalar_impl_is_high
# define secp256k1_scalar_cond_negate secp256k1_scalar_impl_cond_negate
# define secp256k1_scalar_eq secp256k1_scalar_impl_eq
# define secp256k1_scalar_split_128 secp256k1_scalar_impl_split_128
# define secp256k1_scalar_cmov secp256k1_scalar_impl_cmov
#endif /* !defined(VERIFY) */

/** Clear a scalar to prevent the leak of sensitive data. */
static void secp256k1_scalar_clear(secp256k1_scalar *r);

Expand Down
72 changes: 36 additions & 36 deletions src/scalar_4x64_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,37 +29,37 @@
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)

SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
SECP256K1_INLINE static void secp256k1_scalar_impl_clear(secp256k1_scalar *r) {
r->d[0] = 0;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}

SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
SECP256K1_INLINE static void secp256k1_scalar_impl_set_int(secp256k1_scalar *r, unsigned int v) {
r->d[0] = v;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}

SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int secp256k1_scalar_impl_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}

SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int secp256k1_scalar_impl_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
if ((offset + count - 1) >> 6 == offset >> 6) {
return secp256k1_scalar_get_bits(a, offset, count);
return secp256k1_scalar_impl_get_bits(a, offset, count);
} else {
VERIFY_CHECK((offset >> 6) + 1 < 4);
return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
}
}

SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
SECP256K1_INLINE static int secp256k1_scalar_impl_check_overflow(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
Expand Down Expand Up @@ -88,7 +88,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
return overflow;
}

static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
static int secp256k1_scalar_impl_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
secp256k1_uint128 t;
secp256k1_u128_from_u64(&t, a->d[0]);
Expand All @@ -103,13 +103,13 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
secp256k1_u128_accum_u64(&t, a->d[3]);
secp256k1_u128_accum_u64(&t, b->d[3]);
r->d[3] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_check_overflow(r);
overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_impl_check_overflow(r);
VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow);
return overflow;
}

static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
static void secp256k1_scalar_impl_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_uint128 t;
volatile int vflag = flag;
VERIFY_CHECK(bit < 256);
Expand All @@ -131,31 +131,31 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
#endif
}

static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
static void secp256k1_scalar_impl_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = secp256k1_read_be64(&b32[24]);
r->d[1] = secp256k1_read_be64(&b32[16]);
r->d[2] = secp256k1_read_be64(&b32[8]);
r->d[3] = secp256k1_read_be64(&b32[0]);
over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
over = secp256k1_scalar_reduce(r, secp256k1_scalar_impl_check_overflow(r));
if (overflow) {
*overflow = over;
}
}

static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
static void secp256k1_scalar_impl_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_write_be64(&bin[0], a->d[3]);
secp256k1_write_be64(&bin[8], a->d[2]);
secp256k1_write_be64(&bin[16], a->d[1]);
secp256k1_write_be64(&bin[24], a->d[0]);
}

SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
SECP256K1_INLINE static int secp256k1_scalar_impl_is_zero(const secp256k1_scalar *a) {
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}

static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
static void secp256k1_scalar_impl_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_impl_is_zero(a) == 0);
secp256k1_uint128 t;
secp256k1_u128_from_u64(&t, ~a->d[0]);
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
Expand All @@ -171,11 +171,11 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
}

SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
SECP256K1_INLINE static int secp256k1_scalar_impl_is_one(const secp256k1_scalar *a) {
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}

static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
static int secp256k1_scalar_impl_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_H_3);
Expand All @@ -187,12 +187,12 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
return yes;
}

static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
static int secp256k1_scalar_impl_cond_negate(secp256k1_scalar *r, int flag) {
/* If we are flag = 0, mask = 00...00 and this is a no-op;
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_impl_negate */
volatile int vflag = flag;
uint64_t mask = -vflag;
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
uint64_t nonzero = (secp256k1_scalar_impl_is_zero(r) != 0) - 1;
secp256k1_uint128 t;
secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
Expand Down Expand Up @@ -585,7 +585,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
#endif

/* Final reduction of r. */
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
secp256k1_scalar_reduce(r, c + secp256k1_scalar_impl_check_overflow(r));
}

static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
Expand Down Expand Up @@ -762,13 +762,13 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
#undef extract
#undef extract_fast

static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
static void secp256k1_scalar_impl_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8];
secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);
}

static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
static int secp256k1_scalar_impl_shr_int(secp256k1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
Expand All @@ -780,7 +780,7 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
return ret;
}

static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
static void secp256k1_scalar_impl_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = 0;
Expand All @@ -791,7 +791,7 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[3] = 0;
}

SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
SECP256K1_INLINE static int secp256k1_scalar_impl_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}

Expand All @@ -809,10 +809,10 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
secp256k1_scalar_impl_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
}

static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
static SECP256K1_INLINE void secp256k1_scalar_impl_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint64_t mask0, mask1;
volatile int vflag = flag;
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
Expand Down Expand Up @@ -842,7 +842,7 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
r->d[3] = a3 >> 6 | a4 << 56;

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
VERIFY_CHECK(secp256k1_scalar_impl_check_overflow(r) == 0);
#endif
}

Expand All @@ -851,7 +851,7 @@ static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const s
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0);
VERIFY_CHECK(secp256k1_scalar_impl_check_overflow(a) == 0);
#endif

r->v[0] = a0 & M62;
Expand All @@ -866,35 +866,35 @@ static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar = {
0x34F20099AA774EC1LL
};

static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
static void secp256k1_scalar_impl_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
int zero_in = secp256k1_scalar_impl_is_zero(x);
#endif
secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
VERIFY_CHECK(secp256k1_scalar_impl_is_zero(r) == zero_in);
#endif
}

static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
static void secp256k1_scalar_impl_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
int zero_in = secp256k1_scalar_impl_is_zero(x);
#endif
secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
VERIFY_CHECK(secp256k1_scalar_impl_is_zero(r) == zero_in);
#endif
}

SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
SECP256K1_INLINE static int secp256k1_scalar_impl_is_even(const secp256k1_scalar *a) {
return !(a->d[0] & 1);
}

Expand Down
Loading

0 comments on commit c53faec

Please sign in to comment.