From edf5b23c278a9f1de88ebe084a86411c0f89fbfc Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 18:43:53 +0100 Subject: [PATCH 01/16] move code --- crates/precompile/src/blake2.rs | 641 +----------------- crates/precompile/src/bls12_381.rs | 20 +- crates/precompile/src/bls12_381/g1_add.rs | 3 +- crates/precompile/src/bls12_381/g1_msm.rs | 3 +- crates/precompile/src/bls12_381/g2_add.rs | 3 +- crates/precompile/src/bls12_381/g2_msm.rs | 3 +- .../precompile/src/bls12_381/map_fp2_to_g2.rs | 7 +- .../precompile/src/bls12_381/map_fp_to_g1.rs | 7 +- crates/precompile/src/bls12_381/pairing.rs | 3 +- crates/precompile/src/bn128.rs | 16 +- .../precompile/src/crypto/blake2/constants.rs | 7 + crates/precompile/src/crypto/blake2/mod.rs | 567 ++++++++++++++++ .../src/{ => crypto}/bls12_381/arkworks.rs | 20 +- .../src/{ => crypto}/bls12_381/blst.rs | 20 +- .../src/crypto/bls12_381/constants.rs | 19 + crates/precompile/src/crypto/bls12_381/mod.rs | 47 ++ .../src/{ => crypto}/bn128/arkworks.rs | 8 +- .../precompile/src/crypto/bn128/constants.rs | 13 + crates/precompile/src/crypto/bn128/mod.rs | 13 + .../src/{ => crypto}/bn128/substrate.rs | 8 +- .../precompile/src/crypto/hash/constants.rs | 7 + crates/precompile/src/crypto/hash/mod.rs | 21 + crates/precompile/src/crypto/kzg/mod.rs | 40 ++ crates/precompile/src/crypto/mod.rs | 29 + crates/precompile/src/crypto/modexp/mod.rs | 28 + .../secp256k1/bitcoin_secp256k1.rs | 0 .../src/crypto/secp256k1/constants.rs | 7 + .../src/{ => crypto}/secp256k1/k256.rs | 0 crates/precompile/src/crypto/secp256k1/mod.rs | 16 + .../secp256k1/parity_libsecp256k1.rs | 0 .../src/crypto/secp256r1/constants.rs | 10 + crates/precompile/src/crypto/secp256r1/mod.rs | 27 + crates/precompile/src/hash.rs | 9 +- crates/precompile/src/kzg_point_evaluation.rs | 52 +- crates/precompile/src/lib.rs | 2 + crates/precompile/src/modexp.rs | 27 +- crates/precompile/src/secp256k1.rs | 26 +- crates/precompile/src/secp256r1.rs | 12 +- 38 files changed, 923 insertions(+), 818 deletions(-) create mode 100644 crates/precompile/src/crypto/blake2/constants.rs create mode 100644 crates/precompile/src/crypto/blake2/mod.rs rename crates/precompile/src/{ => crypto}/bls12_381/arkworks.rs (97%) rename crates/precompile/src/{ => crypto}/bls12_381/blst.rs (98%) create mode 100644 crates/precompile/src/crypto/bls12_381/constants.rs create mode 100644 crates/precompile/src/crypto/bls12_381/mod.rs rename crates/precompile/src/{ => crypto}/bn128/arkworks.rs (96%) create mode 100644 crates/precompile/src/crypto/bn128/constants.rs create mode 100644 crates/precompile/src/crypto/bn128/mod.rs rename crates/precompile/src/{ => crypto}/bn128/substrate.rs (95%) create mode 100644 crates/precompile/src/crypto/hash/constants.rs create mode 100644 crates/precompile/src/crypto/hash/mod.rs create mode 100644 crates/precompile/src/crypto/kzg/mod.rs create mode 100644 crates/precompile/src/crypto/mod.rs create mode 100644 crates/precompile/src/crypto/modexp/mod.rs rename crates/precompile/src/{ => crypto}/secp256k1/bitcoin_secp256k1.rs (100%) create mode 100644 crates/precompile/src/crypto/secp256k1/constants.rs rename crates/precompile/src/{ => crypto}/secp256k1/k256.rs (100%) create mode 100644 crates/precompile/src/crypto/secp256k1/mod.rs rename crates/precompile/src/{ => crypto}/secp256k1/parity_libsecp256k1.rs (100%) create mode 100644 crates/precompile/src/crypto/secp256r1/constants.rs create mode 100644 crates/precompile/src/crypto/secp256r1/mod.rs diff --git a/crates/precompile/src/blake2.rs b/crates/precompile/src/blake2.rs index 3996b894a7..7e14e26403 100644 --- a/crates/precompile/src/blake2.rs +++ b/crates/precompile/src/blake2.rs @@ -18,42 +18,37 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { // Rounds 4 bytes let rounds = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; + let gas_used = rounds as u64 * F_ROUND; if gas_used > gas_limit { return Err(PrecompileError::OutOfGas); } - let f = match input[212] { - 1 => true, - 0 => false, - _ => return Err(PrecompileError::Blake2WrongFinalIndicatorFlag), - }; - + // Parse inputs let mut h = [0u64; 8]; - //let mut m = [0u64; 16]; + let f: bool = input[212] != 0; - let t; - // Optimized parsing using ptr::read_unaligned for potentially better performance + // state vector h + let h_be = &input[4..68]; - let m; - unsafe { - let ptr = input.as_ptr(); + for (i, item) in h.iter_mut().enumerate() { + let mut buf = [0u8; 8]; + buf.copy_from_slice(&h_be[i * 8..(i + 1) * 8]); + *item = u64::from_le_bytes(buf); + } - // Read h values - for (i, item) in h.iter_mut().enumerate() { - *item = u64::from_le_bytes(core::ptr::read_unaligned( - ptr.add(4 + i * 8) as *const [u8; 8] - )); - } + // message block vector m + let m: [u8; 128] = input[68..196].try_into().unwrap(); - m = input[68..68 + 16 * size_of::()].try_into().unwrap(); + // 2w-bit offset counter t + let t_be = &input[196..212]; + let mut buf: [u8; 8] = t_be[..8].try_into().unwrap(); + let t0 = u64::from_le_bytes(buf); + buf = t_be[8..].try_into().unwrap(); + let t1 = u64::from_le_bytes(buf); + let t = [t0, t1]; - t = [ - u64::from_le_bytes(core::ptr::read_unaligned(ptr.add(196) as *const [u8; 8])), - u64::from_le_bytes(core::ptr::read_unaligned(ptr.add(204) as *const [u8; 8])), - ]; - } - algo::compress(rounds, &mut h, m, t, f); + crate::crypto::blake2::compress(rounds, &mut h, &m, t, f); let mut out = [0u8; 64]; for (i, h) in (0..64).step_by(8).zip(h.iter()) { @@ -61,598 +56,4 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { } Ok(PrecompileOutput::new(gas_used, out.into())) -} - -/// Blake2 algorithm -pub mod algo { - /// SIGMA from spec: - pub const SIGMA: [[usize; 16]; 10] = [ - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], - [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], - [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], - [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], - [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], - [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], - [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], - [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], - [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], - ]; - - /// got IV from: - pub const IV: [u64; 8] = [ - 0x6a09e667f3bcc908, - 0xbb67ae8584caa73b, - 0x3c6ef372fe94f82b, - 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, - 0x9b05688c2b3e6c1f, - 0x1f83d9abfb41bd6b, - 0x5be0cd19137e2179, - ]; - - #[inline(always)] - #[allow(clippy::many_single_char_names)] - /// G function: - pub fn g(v: &mut [u64], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { - v[a] = v[a].wrapping_add(v[b]); - v[a] = v[a].wrapping_add(x); - v[d] ^= v[a]; - v[d] = v[d].rotate_right(32); - v[c] = v[c].wrapping_add(v[d]); - v[b] ^= v[c]; - v[b] = v[b].rotate_right(24); - - v[a] = v[a].wrapping_add(v[b]); - v[a] = v[a].wrapping_add(y); - v[d] ^= v[a]; - v[d] = v[d].rotate_right(16); - v[c] = v[c].wrapping_add(v[d]); - v[b] ^= v[c]; - v[b] = v[b].rotate_right(63); - } - - /// Compression function F takes as an argument the state vector "h", - /// message block vector "m" (last block is padded with zeros to full - /// block size, if required), 2w-bit offset counter "t", and final block - /// indicator flag "f". Local vector v[0..15] is used in processing. F - /// returns a new state vector. The number of rounds, "r", is 12 for - /// BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1. - #[allow(clippy::many_single_char_names)] - pub fn compress( - rounds: usize, - h: &mut [u64; 8], - m_slice: &[u8; 16 * size_of::()], - t: [u64; 2], - f: bool, - ) { - assert!(m_slice.len() == 16 * size_of::()); - - #[cfg(all(target_feature = "avx2", feature = "std"))] - { - // only if it is compiled with avx2 flag and it is std, we can use avx2. - if std::is_x86_feature_detected!("avx2") { - // avx2 is 1.8x more performant than portable implementation. - unsafe { - super::avx2::compress_block( - rounds, - m_slice, - h, - ((t[1] as u128) << 64) | (t[0] as u128), - if f { !0 } else { 0 }, - 0, - ); - } - return; - } - } - - // if avx2 is not available, use the fallback portable implementation - - // Read m values - let mut m = [0u64; 16]; - for (i, item) in m.iter_mut().enumerate() { - *item = u64::from_le_bytes(unsafe { - core::ptr::read_unaligned(m_slice.as_ptr().add(i * 8) as *const [u8; 8]) - }); - } - - let mut v = [0u64; 16]; - v[..h.len()].copy_from_slice(h); // First half from state. - v[h.len()..].copy_from_slice(&IV); // Second half from IV. - - v[12] ^= t[0]; - v[13] ^= t[1]; - - if f { - v[14] = !v[14] // Invert all bits if the last-block-flag is set. - } - for i in 0..rounds { - round(&mut v, &m, i); - } - - for i in 0..8 { - h[i] ^= v[i] ^ v[i + 8]; - } - } - - #[inline(always)] - fn round(v: &mut [u64; 16], m: &[u64; 16], r: usize) { - // Message word selection permutation for this round. - let s = &SIGMA[r % 10]; - // g1 - g(v, 0, 4, 8, 12, m[s[0]], m[s[1]]); - g(v, 1, 5, 9, 13, m[s[2]], m[s[3]]); - g(v, 2, 6, 10, 14, m[s[4]], m[s[5]]); - g(v, 3, 7, 11, 15, m[s[6]], m[s[7]]); - - // g2 - g(v, 0, 5, 10, 15, m[s[8]], m[s[9]]); - g(v, 1, 6, 11, 12, m[s[10]], m[s[11]]); - g(v, 2, 7, 8, 13, m[s[12]], m[s[13]]); - g(v, 3, 4, 9, 14, m[s[14]], m[s[15]]); - } -} - -// Adapted from https://github.com/rust-lang-nursery/stdsimd/pull/479. -macro_rules! _MM_SHUFFLE { - ($z:expr, $y:expr, $x:expr, $w:expr) => { - ($z << 6) | ($y << 4) | ($x << 2) | $w - }; -} - -/// Code adapted from https://github.com/oconnor663/blake2_simd/blob/82b3e2aee4d2384aabbeb146058301ff0dbd453f/blake2b/src/avx2.rs -#[cfg(all(target_feature = "avx2", feature = "std"))] -mod avx2 { - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - use core::arch::x86_64::*; - - use super::algo::IV; - use arrayref::{array_refs, mut_array_refs}; - - type Word = u64; - type Count = u128; - /// The number input bytes passed to each call to the compression function. Small benchmarks need - /// to use an even multiple of `BLOCKBYTES`, or else their apparent throughput will be low. - const BLOCKBYTES: usize = 16 * size_of::(); - - const DEGREE: usize = 4; - - /// Compress a block of data using the BLAKE2 algorithm. - #[inline(always)] - pub(crate) unsafe fn compress_block( - mut rounds: usize, - block: &[u8; BLOCKBYTES], - words: &mut [Word; 8], - count: Count, - last_block: Word, - last_node: Word, - ) { - let (words_low, words_high) = mut_array_refs!(words, DEGREE, DEGREE); - let (iv_low, iv_high) = array_refs!(&IV, DEGREE, DEGREE); - let mut a = loadu(words_low); - let mut b = loadu(words_high); - let mut c = loadu(iv_low); - let flags = set4(count_low(count), count_high(count), last_block, last_node); - let mut d = xor(loadu(iv_high), flags); - - let msg_chunks = array_refs!(block, 16, 16, 16, 16, 16, 16, 16, 16); - let m0 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.0)); - let m1 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.1)); - let m2 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.2)); - let m3 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.3)); - let m4 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.4)); - let m5 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.5)); - let m6 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.6)); - let m7 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.7)); - - let iv0 = a; - let iv1 = b; - let mut t0; - let mut t1; - let mut b0; - - loop { - if rounds == 0 { - break; - } - rounds -= 1; - - // round 1 - t0 = _mm256_unpacklo_epi64(m0, m1); - t1 = _mm256_unpacklo_epi64(m2, m3); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m0, m1); - t1 = _mm256_unpackhi_epi64(m2, m3); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_unpacklo_epi64(m7, m4); - t1 = _mm256_unpacklo_epi64(m5, m6); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m7, m4); - t1 = _mm256_unpackhi_epi64(m5, m6); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 2 - t0 = _mm256_unpacklo_epi64(m7, m2); - t1 = _mm256_unpackhi_epi64(m4, m6); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m5, m4); - t1 = _mm256_alignr_epi8(m3, m7, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_unpackhi_epi64(m2, m0); - t1 = _mm256_blend_epi32(m5, m0, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_alignr_epi8(m6, m1, 8); - t1 = _mm256_blend_epi32(m3, m1, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 3 - t0 = _mm256_alignr_epi8(m6, m5, 8); - t1 = _mm256_unpackhi_epi64(m2, m7); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m4, m0); - t1 = _mm256_blend_epi32(m6, m1, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_alignr_epi8(m5, m4, 8); - t1 = _mm256_unpackhi_epi64(m1, m3); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m2, m7); - t1 = _mm256_blend_epi32(m0, m3, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 4 - t0 = _mm256_unpackhi_epi64(m3, m1); - t1 = _mm256_unpackhi_epi64(m6, m5); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m4, m0); - t1 = _mm256_unpacklo_epi64(m6, m7); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_alignr_epi8(m1, m7, 8); - t1 = _mm256_shuffle_epi32(m2, _MM_SHUFFLE!(1, 0, 3, 2)); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m4, m3); - t1 = _mm256_unpacklo_epi64(m5, m0); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 5 - t0 = _mm256_unpackhi_epi64(m4, m2); - t1 = _mm256_unpacklo_epi64(m1, m5); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_blend_epi32(m3, m0, 0x33); - t1 = _mm256_blend_epi32(m7, m2, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_alignr_epi8(m7, m1, 8); - t1 = _mm256_alignr_epi8(m3, m5, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m6, m0); - t1 = _mm256_unpacklo_epi64(m6, m4); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 6 - t0 = _mm256_unpacklo_epi64(m1, m3); - t1 = _mm256_unpacklo_epi64(m0, m4); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m6, m5); - t1 = _mm256_unpackhi_epi64(m5, m1); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_alignr_epi8(m2, m0, 8); - t1 = _mm256_unpackhi_epi64(m3, m7); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m4, m6); - t1 = _mm256_alignr_epi8(m7, m2, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 7 - t0 = _mm256_blend_epi32(m0, m6, 0x33); - t1 = _mm256_unpacklo_epi64(m7, m2); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m2, m7); - t1 = _mm256_alignr_epi8(m5, m6, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_unpacklo_epi64(m4, m0); - t1 = _mm256_blend_epi32(m4, m3, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m5, m3); - t1 = _mm256_shuffle_epi32(m1, _MM_SHUFFLE!(1, 0, 3, 2)); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - // round 8 - t0 = _mm256_unpackhi_epi64(m6, m3); - t1 = _mm256_blend_epi32(m1, m6, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_alignr_epi8(m7, m5, 8); - t1 = _mm256_unpackhi_epi64(m0, m4); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_blend_epi32(m2, m1, 0x33); - t1 = _mm256_alignr_epi8(m4, m7, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m5, m0); - t1 = _mm256_unpacklo_epi64(m2, m3); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 9 - t0 = _mm256_unpacklo_epi64(m3, m7); - t1 = _mm256_alignr_epi8(m0, m5, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpackhi_epi64(m7, m4); - t1 = _mm256_alignr_epi8(m4, m1, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_unpacklo_epi64(m5, m6); - t1 = _mm256_unpackhi_epi64(m6, m0); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_alignr_epi8(m1, m2, 8); - t1 = _mm256_alignr_epi8(m2, m3, 8); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - if rounds == 0 { - break; - } - rounds -= 1; - - // round 10 - t0 = _mm256_unpacklo_epi64(m5, m4); - t1 = _mm256_unpackhi_epi64(m3, m0); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_unpacklo_epi64(m1, m2); - t1 = _mm256_blend_epi32(m2, m3, 0x33); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - diagonalize(&mut a, &mut b, &mut c, &mut d); - t0 = _mm256_unpackhi_epi64(m6, m7); - t1 = _mm256_unpackhi_epi64(m4, m1); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g1(&mut a, &mut b, &mut c, &mut d, &mut b0); - t0 = _mm256_blend_epi32(m5, m0, 0x33); - t1 = _mm256_unpacklo_epi64(m7, m6); - b0 = _mm256_blend_epi32(t0, t1, 0xF0); - g2(&mut a, &mut b, &mut c, &mut d, &mut b0); - undiagonalize(&mut a, &mut b, &mut c, &mut d); - - // last two rounds are removed - } - a = xor(a, c); - b = xor(b, d); - a = xor(a, iv0); - b = xor(b, iv1); - - storeu(a, words_low); - storeu(b, words_high); - } - - #[inline(always)] - pub(crate) fn count_low(count: Count) -> Word { - count as Word - } - - #[inline(always)] - pub(crate) fn count_high(count: Count) -> Word { - (count >> 8 * size_of::()) as Word - } - - #[inline(always)] - unsafe fn loadu(src: *const [Word; DEGREE]) -> __m256i { - // This is an unaligned load, so the pointer cast is allowed. - _mm256_loadu_si256(src as *const __m256i) - } - - #[inline(always)] - unsafe fn storeu(src: __m256i, dest: *mut [Word; DEGREE]) { - // This is an unaligned store, so the pointer cast is allowed. - _mm256_storeu_si256(dest as *mut __m256i, src) - } - - #[inline(always)] - unsafe fn loadu_128(mem_addr: &[u8; 16]) -> __m128i { - _mm_loadu_si128(mem_addr.as_ptr() as *const __m128i) - } - - #[inline(always)] - unsafe fn add(a: __m256i, b: __m256i) -> __m256i { - _mm256_add_epi64(a, b) - } - - #[inline(always)] - unsafe fn xor(a: __m256i, b: __m256i) -> __m256i { - _mm256_xor_si256(a, b) - } - - #[inline(always)] - unsafe fn set4(a: u64, b: u64, c: u64, d: u64) -> __m256i { - _mm256_setr_epi64x(a as i64, b as i64, c as i64, d as i64) - } - - // These rotations are the "simple version". For the "complicated version", see - // https://github.com/sneves/blake2-avx2/blob/b3723921f668df09ece52dcd225a36d4a4eea1d9/blake2b-common.h#L43-L46. - // For a discussion of the tradeoffs, see - // https://github.com/sneves/blake2-avx2/pull/5. In short: - // - Due to an LLVM bug (https://bugs.llvm.org/show_bug.cgi?id=44379), this - // version performs better on recent x86 chips. - // - LLVM is able to optimize this version to AVX-512 rotation instructions - // when those are enabled. - #[inline(always)] - unsafe fn rot32(x: __m256i) -> __m256i { - _mm256_or_si256(_mm256_srli_epi64(x, 32), _mm256_slli_epi64(x, 64 - 32)) - } - - #[inline(always)] - unsafe fn rot24(x: __m256i) -> __m256i { - _mm256_or_si256(_mm256_srli_epi64(x, 24), _mm256_slli_epi64(x, 64 - 24)) - } - - #[inline(always)] - unsafe fn rot16(x: __m256i) -> __m256i { - _mm256_or_si256(_mm256_srli_epi64(x, 16), _mm256_slli_epi64(x, 64 - 16)) - } - - #[inline(always)] - unsafe fn rot63(x: __m256i) -> __m256i { - _mm256_or_si256(_mm256_srli_epi64(x, 63), _mm256_slli_epi64(x, 64 - 63)) - } - - #[inline(always)] - unsafe fn g1( - a: &mut __m256i, - b: &mut __m256i, - c: &mut __m256i, - d: &mut __m256i, - m: &mut __m256i, - ) { - *a = add(*a, *m); - *a = add(*a, *b); - *d = xor(*d, *a); - *d = rot32(*d); - *c = add(*c, *d); - *b = xor(*b, *c); - *b = rot24(*b); - } - - #[inline(always)] - unsafe fn g2( - a: &mut __m256i, - b: &mut __m256i, - c: &mut __m256i, - d: &mut __m256i, - m: &mut __m256i, - ) { - *a = add(*a, *m); - *a = add(*a, *b); - *d = xor(*d, *a); - *d = rot16(*d); - *c = add(*c, *d); - *b = xor(*b, *c); - *b = rot63(*b); - } - - // Note the optimization here of leaving b as the unrotated row, rather than a. - // All the message loads below are adjusted to compensate for this. See - // discussion at https://github.com/sneves/blake2-avx2/pull/4 - #[inline(always)] - unsafe fn diagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { - *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(2, 1, 0, 3)); - *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); - *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(0, 3, 2, 1)); - } - - #[inline(always)] - unsafe fn undiagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { - *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(0, 3, 2, 1)); - *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); - *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(2, 1, 0, 3)); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use primitives::hex; - use std::time::Instant; - - #[test] - fn perfblake2() { - let input = [hex!("0000040048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b616162636465666768696a6b6c6d6e6f700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001") - ,hex!("0000020048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001") - ,hex!("0000004048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001")]; - - let time = Instant::now(); - for i in 0..3000 { - let _ = run(&input[i % 3], u64::MAX).unwrap(); - } - println!("duration: {:?}", time.elapsed()); - } -} +} \ No newline at end of file diff --git a/crates/precompile/src/bls12_381.rs b/crates/precompile/src/bls12_381.rs index 879dcd60bc..9a332e0dbe 100644 --- a/crates/precompile/src/bls12_381.rs +++ b/crates/precompile/src/bls12_381.rs @@ -2,26 +2,8 @@ //! For more details check modules for each precompile. use crate::PrecompileWithAddress; -cfg_if::cfg_if! { - if #[cfg(feature = "blst")]{ - mod blst; - use blst as crypto_backend; - } else { - mod arkworks; - use arkworks as crypto_backend; - } -} - // Re-export type aliases for use in submodules -use crate::bls12_381_const::FP_LENGTH; -type G1Point = ([u8; FP_LENGTH], [u8; FP_LENGTH]); -type G2Point = ( - [u8; FP_LENGTH], - [u8; FP_LENGTH], - [u8; FP_LENGTH], - [u8; FP_LENGTH], -); -type PairingPair = (G1Point, G2Point); +use crate::crypto::bls12_381::{G1Point, G2Point, PairingPair}; pub mod g1_add; pub mod g1_msm; diff --git a/crates/precompile/src/bls12_381/g1_add.rs b/crates/precompile/src/bls12_381/g1_add.rs index c8aa2f80f0..b43e09c3f5 100644 --- a/crates/precompile/src/bls12_381/g1_add.rs +++ b/crates/precompile/src/bls12_381/g1_add.rs @@ -1,5 +1,4 @@ //! BLS12-381 G1 add precompile. More details in [`g1_add`] -use super::crypto_backend::p1_add_affine_bytes; use super::utils::{pad_g1_point, remove_g1_padding}; use crate::bls12_381_const::{ G1_ADD_ADDRESS, G1_ADD_BASE_GAS_FEE, G1_ADD_INPUT_LENGTH, PADDED_G1_LENGTH, @@ -34,7 +33,7 @@ pub fn g1_add(input: &[u8], gas_limit: u64) -> PrecompileResult { let b = (*b_x, *b_y); // Get unpadded result from crypto backend - let unpadded_result = p1_add_affine_bytes(a, b)?; + let unpadded_result = crate::crypto::bls12_381::p1_add_affine_bytes(a, b)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index f47802ef4b..c03d732ddb 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -1,5 +1,4 @@ //! BLS12-381 G1 msm precompile. More details in [`g1_msm`] -use super::crypto_backend::p1_msm_bytes; use super::G1Point; use crate::bls12_381::utils::{pad_g1_point, remove_g1_padding}; use crate::bls12_381_const::{ @@ -47,7 +46,7 @@ pub fn g1_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = p1_msm_bytes(valid_pairs_iter)?; + let unpadded_result = crate::crypto::bls12_381::g1_multiexp(valid_pairs_iter)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g2_add.rs b/crates/precompile/src/bls12_381/g2_add.rs index bcd4f7984e..7e1bbfc148 100644 --- a/crates/precompile/src/bls12_381/g2_add.rs +++ b/crates/precompile/src/bls12_381/g2_add.rs @@ -1,5 +1,4 @@ //! BLS12-381 G2 add precompile. More details in [`g2_add`] -use super::crypto_backend::p2_add_affine_bytes; use super::utils::{pad_g2_point, remove_g2_padding}; use crate::bls12_381_const::{ G2_ADD_ADDRESS, G2_ADD_BASE_GAS_FEE, G2_ADD_INPUT_LENGTH, PADDED_G2_LENGTH, @@ -35,7 +34,7 @@ pub fn g2_add(input: &[u8], gas_limit: u64) -> PrecompileResult { let b = (*b_x_0, *b_x_1, *b_y_0, *b_y_1); // Get unpadded result from crypto backend - let unpadded_result = p2_add_affine_bytes(a, b)?; + let unpadded_result = crate::crypto::bls12_381::p2_add_affine_bytes(a, b)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 98dfa71768..6cfcbe0f60 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -1,5 +1,4 @@ //! BLS12-381 G2 msm precompile. More details in [`g2_msm`] -use super::crypto_backend::p2_msm_bytes; use super::utils::{pad_g2_point, remove_g2_padding}; use super::G2Point; use crate::bls12_381_const::{ @@ -47,7 +46,7 @@ pub fn g2_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = p2_msm_bytes(valid_pairs_iter)?; + let unpadded_result = crate::crypto::bls12_381::g2_multiexp(valid_pairs_iter)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs index 50f67a32cb..eb2424ade6 100644 --- a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs +++ b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs @@ -1,8 +1,5 @@ //! BLS12-381 map fp2 to g2 precompile. More details in [`map_fp2_to_g2`] -use super::{ - crypto_backend::map_fp2_to_g2_bytes, - utils::{pad_g2_point, remove_fp_padding}, -}; +use super::utils::{pad_g2_point, remove_fp_padding}; use crate::bls12_381_const::{ MAP_FP2_TO_G2_ADDRESS, MAP_FP2_TO_G2_BASE_GAS_FEE, PADDED_FP2_LENGTH, PADDED_FP_LENGTH, }; @@ -33,7 +30,7 @@ pub fn map_fp2_to_g2(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0_y = remove_fp_padding(&input[PADDED_FP_LENGTH..PADDED_FP2_LENGTH])?; // Get unpadded result from crypto backend - let unpadded_result = map_fp2_to_g2_bytes(input_p0_x, input_p0_y)?; + let unpadded_result = crate::crypto::bls12_381::fp2_to_g2(input_p0_x, input_p0_y)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp_to_g1.rs b/crates/precompile/src/bls12_381/map_fp_to_g1.rs index de4ee4503e..ce7d91f399 100644 --- a/crates/precompile/src/bls12_381/map_fp_to_g1.rs +++ b/crates/precompile/src/bls12_381/map_fp_to_g1.rs @@ -1,8 +1,5 @@ //! BLS12-381 map fp to g1 precompile. More details in [`map_fp_to_g1`] -use super::{ - crypto_backend::map_fp_to_g1_bytes, - utils::{pad_g1_point, remove_fp_padding}, -}; +use super::utils::{pad_g1_point, remove_fp_padding}; use crate::bls12_381_const::{MAP_FP_TO_G1_ADDRESS, MAP_FP_TO_G1_BASE_GAS_FEE, PADDED_FP_LENGTH}; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; @@ -28,7 +25,7 @@ pub fn map_fp_to_g1(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0 = remove_fp_padding(input)?; // Get unpadded result from crypto backend - let unpadded_result = map_fp_to_g1_bytes(input_p0)?; + let unpadded_result = crate::crypto::bls12_381::fp_to_g1(input_p0)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/pairing.rs b/crates/precompile/src/bls12_381/pairing.rs index 6f9a59bcbd..92ccb11e8e 100644 --- a/crates/precompile/src/bls12_381/pairing.rs +++ b/crates/precompile/src/bls12_381/pairing.rs @@ -1,5 +1,4 @@ //! BLS12-381 pairing precompile. More details in [`pairing`] -use super::crypto_backend::pairing_check_bytes; use super::utils::{remove_g1_padding, remove_g2_padding}; use super::PairingPair; use crate::bls12_381_const::{ @@ -53,7 +52,7 @@ pub fn pairing(input: &[u8], gas_limit: u64) -> PrecompileResult { pairs.push(((*a_x, *a_y), (*b_x_0, *b_x_1, *b_y_0, *b_y_1))); } - let result = pairing_check_bytes(&pairs)?; + let result = crate::crypto::bls12_381::pairing_check_bytes(&pairs)?; let result = if result { 1 } else { 0 }; Ok(PrecompileOutput::new( diff --git a/crates/precompile/src/bn128.rs b/crates/precompile/src/bn128.rs index 3cad806801..7643dee140 100644 --- a/crates/precompile/src/bn128.rs +++ b/crates/precompile/src/bn128.rs @@ -5,16 +5,6 @@ use crate::{ }; use std::vec::Vec; -cfg_if::cfg_if! { - if #[cfg(feature = "bn")]{ - mod substrate; - use substrate::{g1_point_add, g1_point_mul, pairing_check}; - } else { - mod arkworks; - use arkworks::{g1_point_add, g1_point_mul, pairing_check}; - } -} - /// Bn128 add precompile pub mod add { use super::*; @@ -158,7 +148,7 @@ pub fn run_add(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let p1_bytes = &input[..G1_LEN]; let p2_bytes = &input[G1_LEN..]; - let output = g1_point_add(p1_bytes, p2_bytes)?; + let output = crate::crypto::bn128::g1_point_add(p1_bytes, p2_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -173,7 +163,7 @@ pub fn run_mul(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let point_bytes = &input[..G1_LEN]; let scalar_bytes = &input[G1_LEN..G1_LEN + SCALAR_LEN]; - let output = g1_point_mul(point_bytes, scalar_bytes)?; + let output = crate::crypto::bn128::g1_point_mul(point_bytes, scalar_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -212,7 +202,7 @@ pub fn run_pair( points.push((encoded_g1_element, encoded_g2_element)); } - let pairing_result = pairing_check(&points)?; + let pairing_result = crate::crypto::bn128::pairing_check(&points)?; Ok(PrecompileOutput::new( gas_used, bool_to_bytes32(pairing_result), diff --git a/crates/precompile/src/crypto/blake2/constants.rs b/crates/precompile/src/crypto/blake2/constants.rs new file mode 100644 index 0000000000..7b825df187 --- /dev/null +++ b/crates/precompile/src/crypto/blake2/constants.rs @@ -0,0 +1,7 @@ +//! Constants for Blake2 compression + +/// Length of the Blake2 state vector +pub const STATE_LENGTH: usize = 8; + +/// Length of the Blake2 message block in bytes +pub const MESSAGE_LENGTH: usize = 128; \ No newline at end of file diff --git a/crates/precompile/src/crypto/blake2/mod.rs b/crates/precompile/src/crypto/blake2/mod.rs new file mode 100644 index 0000000000..867c061423 --- /dev/null +++ b/crates/precompile/src/crypto/blake2/mod.rs @@ -0,0 +1,567 @@ +//! Blake2 cryptographic implementations + +// Re-export the main compress function for external use +pub use algo::compress; + +/// Blake2 algorithm +pub mod algo { + /// SIGMA from spec: + pub const SIGMA: [[usize; 16]; 10] = [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], + [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], + [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], + [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], + [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], + [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], + [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], + [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], + [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], + ]; + + /// got IV from: + pub const IV: [u64; 8] = [ + 0x6a09e667f3bcc908, + 0xbb67ae8584caa73b, + 0x3c6ef372fe94f82b, + 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, + 0x9b05688c2b3e6c1f, + 0x1f83d9abfb41bd6b, + 0x5be0cd19137e2179, + ]; + + #[inline(always)] + #[allow(clippy::many_single_char_names)] + /// G function: + fn g(v: &mut [u64; 16], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { + let mut va = v[a]; + let mut vb = v[b]; + let mut vc = v[c]; + let mut vd = v[d]; + + va = va.wrapping_add(vb).wrapping_add(x); + vd = (vd ^ va).rotate_right(32); + vc = vc.wrapping_add(vd); + vb = (vb ^ vc).rotate_right(24); + + va = va.wrapping_add(vb).wrapping_add(y); + vd = (vd ^ va).rotate_right(16); + vc = vc.wrapping_add(vd); + vb = (vb ^ vc).rotate_right(63); + + v[a] = va; + v[b] = vb; + v[c] = vc; + v[d] = vd; + } + + /// Compression function F takes as an argument the state vector "h", + /// message block vector "m" (last block is padded with zeros to full + /// block size, if required), 2w-bit offset counter "t", and final block + /// indicator flag "f". Local vector v[0..15] is used in processing. F + /// returns a new state vector. The number of rounds, "r", is 12 for + /// BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1. + #[allow(clippy::many_single_char_names)] + pub fn compress(rounds: usize, h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool) { + #[cfg(all(target_feature = "avx2", feature = "std"))] + { + // only if it is compiled with avx2 flag and it is std, we can use avx2. + if std::is_x86_feature_detected!("avx2") { + // avx2 is 1.8x more performant than portable implementation. + unsafe { + super::avx2::compress_block( + rounds, + &m, + h, + ((t[1] as u128) << 64) | (t[0] as u128), + if f { !0 } else { 0 }, + 0, + ); + } + return; + } + } + + // if avx2 is not available, use the fallback portable implementation + + let mut v = [0u64; 16]; + v[..h.len()].copy_from_slice(h); // First half from state. + v[h.len()..].copy_from_slice(&IV); // Second half from IV. + + v[12] ^= t[0]; + v[13] ^= t[1]; + + if f { + v[14] = !v[14] // Invert all bits if the last-block-flag is set. + } + for i in 0..rounds { + round(&mut v, &m, i); + } + + for i in 0..8 { + h[i] ^= v[i] ^ v[i + 8]; + } + } + + #[inline(always)] + fn round(v: &mut [u64; 16], m: &[u64; 16], r: usize) { + // Message word selection permutation for this round. + let s = &SIGMA[r % 10]; + // g1 + g(v, 0, 4, 8, 12, m[s[0]], m[s[1]]); + g(v, 1, 5, 9, 13, m[s[2]], m[s[3]]); + g(v, 2, 6, 10, 14, m[s[4]], m[s[5]]); + g(v, 3, 7, 11, 15, m[s[6]], m[s[7]]); + + // g2 + g(v, 0, 5, 10, 15, m[s[8]], m[s[9]]); + g(v, 1, 6, 11, 12, m[s[10]], m[s[11]]); + g(v, 2, 7, 8, 13, m[s[12]], m[s[13]]); + g(v, 3, 4, 9, 14, m[s[14]], m[s[15]]); + } +} + +// Adapted from https://github.com/rust-lang-nursery/stdsimd/pull/479. +macro_rules! _MM_SHUFFLE { + ($z:expr, $y:expr, $x:expr, $w:expr) => { + ($z << 6) | ($y << 4) | ($x << 2) | $w + }; +} + +/// Code adapted from https://github.com/oconnor663/blake2_simd/blob/82b3e2aee4d2384aabbeb146058301ff0dbd453f/blake2b/src/avx2.rs +#[cfg(all(target_feature = "avx2", feature = "std"))] +mod avx2 { + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + + use super::algo::IV; + use arrayref::{array_refs, mut_array_refs}; + + type Word = u64; + type Count = u128; + /// The number input bytes passed to each call to the compression function. Small benchmarks need + /// to use an even multiple of `BLOCKBYTES`, or else their apparent throughput will be low. + const BLOCKBYTES: usize = 16 * size_of::(); + + const DEGREE: usize = 4; + + /// Compress a block of data using the BLAKE2 algorithm. + #[inline(always)] + pub(crate) unsafe fn compress_block( + mut rounds: usize, + block: &[Word; 16], + words: &mut [Word; 8], + count: Count, + last_block: Word, + last_node: Word, + ) { + let (words_low, words_high) = mut_array_refs!(words, DEGREE, DEGREE); + let (iv_low, iv_high) = array_refs!(&IV, DEGREE, DEGREE); + let mut a = loadu(words_low); + let mut b = loadu(words_high); + let mut c = loadu(iv_low); + let flags = set4(count_low(count), count_high(count), last_block, last_node); + let mut d = xor(loadu(iv_high), flags); + + let block: &[u8; BLOCKBYTES] = std::mem::transmute(block); + let msg_chunks = array_refs!(block, 16, 16, 16, 16, 16, 16, 16, 16); + let m0 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.0)); + let m1 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.1)); + let m2 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.2)); + let m3 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.3)); + let m4 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.4)); + let m5 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.5)); + let m6 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.6)); + let m7 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.7)); + + let iv0 = a; + let iv1 = b; + let mut t0; + let mut t1; + let mut b0; + + loop { + if rounds == 0 { + break; + } + rounds -= 1; + + // round 1 + t0 = _mm256_unpacklo_epi64(m0, m1); + t1 = _mm256_unpacklo_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m0, m1); + t1 = _mm256_unpackhi_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m7, m4); + t1 = _mm256_unpacklo_epi64(m5, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m7, m4); + t1 = _mm256_unpackhi_epi64(m5, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 2 + t0 = _mm256_unpacklo_epi64(m7, m2); + t1 = _mm256_unpackhi_epi64(m4, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m5, m4); + t1 = _mm256_alignr_epi8(m3, m7, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpackhi_epi64(m2, m0); + t1 = _mm256_blend_epi32(m5, m0, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m6, m1, 8); + t1 = _mm256_blend_epi32(m3, m1, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 3 + t0 = _mm256_alignr_epi8(m6, m5, 8); + t1 = _mm256_unpackhi_epi64(m2, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m4, m0); + t1 = _mm256_blend_epi32(m6, m1, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m5, m4, 8); + t1 = _mm256_unpackhi_epi64(m1, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m2, m7); + t1 = _mm256_blend_epi32(m0, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 4 + t0 = _mm256_unpackhi_epi64(m3, m1); + t1 = _mm256_unpackhi_epi64(m6, m5); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m4, m0); + t1 = _mm256_unpacklo_epi64(m6, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m1, m7, 8); + t1 = _mm256_shuffle_epi32(m2, _MM_SHUFFLE!(1, 0, 3, 2)); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m4, m3); + t1 = _mm256_unpacklo_epi64(m5, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 5 + t0 = _mm256_unpackhi_epi64(m4, m2); + t1 = _mm256_unpacklo_epi64(m1, m5); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_blend_epi32(m3, m0, 0x33); + t1 = _mm256_blend_epi32(m7, m2, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m7, m1, 8); + t1 = _mm256_alignr_epi8(m3, m5, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m6, m0); + t1 = _mm256_unpacklo_epi64(m6, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 6 + t0 = _mm256_unpacklo_epi64(m1, m3); + t1 = _mm256_unpacklo_epi64(m0, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m6, m5); + t1 = _mm256_unpackhi_epi64(m5, m1); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m2, m0, 8); + t1 = _mm256_unpackhi_epi64(m3, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m4, m6); + t1 = _mm256_alignr_epi8(m7, m2, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 7 + t0 = _mm256_blend_epi32(m0, m6, 0x33); + t1 = _mm256_unpacklo_epi64(m7, m2); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m2, m7); + t1 = _mm256_alignr_epi8(m5, m6, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m4, m0); + t1 = _mm256_blend_epi32(m4, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m5, m3); + t1 = _mm256_shuffle_epi32(m1, _MM_SHUFFLE!(1, 0, 3, 2)); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + // round 8 + t0 = _mm256_unpackhi_epi64(m6, m3); + t1 = _mm256_blend_epi32(m1, m6, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m7, m5, 8); + t1 = _mm256_unpackhi_epi64(m0, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_blend_epi32(m2, m1, 0x33); + t1 = _mm256_alignr_epi8(m4, m7, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m5, m0); + t1 = _mm256_unpacklo_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 9 + t0 = _mm256_unpacklo_epi64(m3, m7); + t1 = _mm256_alignr_epi8(m0, m5, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m7, m4); + t1 = _mm256_alignr_epi8(m4, m1, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m5, m6); + t1 = _mm256_unpackhi_epi64(m6, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m1, m2, 8); + t1 = _mm256_alignr_epi8(m2, m3, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 10 + t0 = _mm256_unpacklo_epi64(m5, m4); + t1 = _mm256_unpackhi_epi64(m3, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m1, m2); + t1 = _mm256_blend_epi32(m2, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpackhi_epi64(m6, m7); + t1 = _mm256_unpackhi_epi64(m4, m1); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_blend_epi32(m5, m0, 0x33); + t1 = _mm256_unpacklo_epi64(m7, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + // last two rounds are removed + } + a = xor(a, c); + b = xor(b, d); + a = xor(a, iv0); + b = xor(b, iv1); + + storeu(a, words_low); + storeu(b, words_high); + } + + #[inline(always)] + pub(crate) fn count_low(count: Count) -> Word { + count as Word + } + + #[inline(always)] + pub(crate) fn count_high(count: Count) -> Word { + (count >> 8 * size_of::()) as Word + } + + #[inline(always)] + unsafe fn loadu(src: *const [Word; DEGREE]) -> __m256i { + // This is an unaligned load, so the pointer cast is allowed. + _mm256_loadu_si256(src as *const __m256i) + } + + #[inline(always)] + unsafe fn storeu(src: __m256i, dest: *mut [Word; DEGREE]) { + // This is an unaligned store, so the pointer cast is allowed. + _mm256_storeu_si256(dest as *mut __m256i, src) + } + + #[inline(always)] + unsafe fn loadu_128(mem_addr: &[u8; 16]) -> __m128i { + _mm_loadu_si128(mem_addr.as_ptr() as *const __m128i) + } + + #[inline(always)] + unsafe fn add(a: __m256i, b: __m256i) -> __m256i { + _mm256_add_epi64(a, b) + } + + #[inline(always)] + unsafe fn xor(a: __m256i, b: __m256i) -> __m256i { + _mm256_xor_si256(a, b) + } + + #[inline(always)] + unsafe fn set4(a: u64, b: u64, c: u64, d: u64) -> __m256i { + _mm256_setr_epi64x(a as i64, b as i64, c as i64, d as i64) + } + + // These rotations are the "simple version". For the "complicated version", see + // https://github.com/sneves/blake2-avx2/blob/b3723921f668df09ece52dcd225a36d4a4eea1d9/blake2b-common.h#L43-L46. + // For a discussion of the tradeoffs, see + // https://github.com/sneves/blake2-avx2/pull/5. In short: + // - Due to an LLVM bug (https://bugs.llvm.org/show_bug.cgi?id=44379), this + // version performs better on recent x86 chips. + // - LLVM is able to optimize this version to AVX-512 rotation instructions + // when those are enabled. + #[inline(always)] + unsafe fn rot32(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 32), _mm256_slli_epi64(x, 64 - 32)) + } + + #[inline(always)] + unsafe fn rot24(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 24), _mm256_slli_epi64(x, 64 - 24)) + } + + #[inline(always)] + unsafe fn rot16(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 16), _mm256_slli_epi64(x, 64 - 16)) + } + + #[inline(always)] + unsafe fn rot63(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 63), _mm256_slli_epi64(x, 64 - 63)) + } + + #[inline(always)] + unsafe fn g1( + a: &mut __m256i, + b: &mut __m256i, + c: &mut __m256i, + d: &mut __m256i, + m: &mut __m256i, + ) { + *a = add(*a, *m); + *a = add(*a, *b); + *d = xor(*d, *a); + *d = rot32(*d); + *c = add(*c, *d); + *b = xor(*b, *c); + *b = rot24(*b); + } + + #[inline(always)] + unsafe fn g2( + a: &mut __m256i, + b: &mut __m256i, + c: &mut __m256i, + d: &mut __m256i, + m: &mut __m256i, + ) { + *a = add(*a, *m); + *a = add(*a, *b); + *d = xor(*d, *a); + *d = rot16(*d); + *c = add(*c, *d); + *b = xor(*b, *c); + *b = rot63(*b); + } + + // Note the optimization here of leaving b as the unrotated row, rather than a. + // All the message loads below are adjusted to compensate for this. See + // discussion at https://github.com/sneves/blake2-avx2/pull/4 + #[inline(always)] + unsafe fn diagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { + *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(2, 1, 0, 3)); + *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); + *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(0, 3, 2, 1)); + } + + #[inline(always)] + unsafe fn undiagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { + *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(0, 3, 2, 1)); + *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); + *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(2, 1, 0, 3)); + } +} diff --git a/crates/precompile/src/bls12_381/arkworks.rs b/crates/precompile/src/crypto/bls12_381/arkworks.rs similarity index 97% rename from crates/precompile/src/bls12_381/arkworks.rs rename to crates/precompile/src/crypto/bls12_381/arkworks.rs index b8eb0a437a..018525d7de 100644 --- a/crates/precompile/src/bls12_381/arkworks.rs +++ b/crates/precompile/src/crypto/bls12_381/arkworks.rs @@ -1,8 +1,6 @@ use super::{G1Point, G2Point, PairingPair}; -use crate::{ - bls12_381_const::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH}, - PrecompileError, -}; +use super::constants::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH}; +use crate::PrecompileError; use ark_bls12_381::{Bls12_381, Fq, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; use ark_ec::{ hashing::{curve_maps::wb::WBMap, map_to_curve_hasher::MapToCurve}, @@ -360,7 +358,7 @@ fn pairing_check(pairs: &[(G1Affine, G2Affine)]) -> bool { /// pairing_check_bytes performs a pairing check on a list of G1 and G2 point pairs taking byte inputs. #[inline] -pub(super) fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { +pub fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { if pairs.is_empty() { return Ok(true); } @@ -405,7 +403,7 @@ pub(super) fn pairing_check_bytes(pairs: &[PairingPair]) -> Result Result<[u8; G1_LENGTH], PrecompileError> { @@ -426,7 +424,7 @@ pub(super) fn p1_add_affine_bytes( /// Performs point addition on two G2 points taking byte coordinates. #[inline] -pub(super) fn p2_add_affine_bytes( +pub fn p2_add_affine_bytes( a: G2Point, b: G2Point, ) -> Result<[u8; G2_LENGTH], PrecompileError> { @@ -447,7 +445,7 @@ pub(super) fn p2_add_affine_bytes( /// Maps a field element to a G1 point from bytes #[inline] -pub(super) fn map_fp_to_g1_bytes( +pub fn map_fp_to_g1_bytes( fp_bytes: &[u8; FP_LENGTH], ) -> Result<[u8; G1_LENGTH], PrecompileError> { let fp = read_fp(fp_bytes)?; @@ -457,7 +455,7 @@ pub(super) fn map_fp_to_g1_bytes( /// Maps field elements to a G2 point from bytes #[inline] -pub(super) fn map_fp2_to_g2_bytes( +pub fn map_fp2_to_g2_bytes( fp2_x: &[u8; FP_LENGTH], fp2_y: &[u8; FP_LENGTH], ) -> Result<[u8; G2_LENGTH], PrecompileError> { @@ -468,7 +466,7 @@ pub(super) fn map_fp2_to_g2_bytes( /// Performs multi-scalar multiplication (MSM) for G1 points taking byte inputs. #[inline] -pub(super) fn p1_msm_bytes( +pub fn p1_msm_bytes( point_scalar_pairs: impl Iterator>, ) -> Result<[u8; G1_LENGTH], PrecompileError> { let mut g1_points = Vec::new(); @@ -505,7 +503,7 @@ pub(super) fn p1_msm_bytes( /// Performs multi-scalar multiplication (MSM) for G2 points taking byte inputs. #[inline] -pub(super) fn p2_msm_bytes( +pub fn p2_msm_bytes( point_scalar_pairs: impl Iterator>, ) -> Result<[u8; G2_LENGTH], PrecompileError> { let mut g2_points = Vec::new(); diff --git a/crates/precompile/src/bls12_381/blst.rs b/crates/precompile/src/crypto/bls12_381/blst.rs similarity index 98% rename from crates/precompile/src/bls12_381/blst.rs rename to crates/precompile/src/crypto/bls12_381/blst.rs index 822eed35ba..3a90a818af 100644 --- a/crates/precompile/src/bls12_381/blst.rs +++ b/crates/precompile/src/crypto/bls12_381/blst.rs @@ -1,10 +1,8 @@ // This module contains a safe wrapper around the blst library. use super::{G1Point, G2Point, PairingPair}; -use crate::{ - bls12_381_const::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH, SCALAR_LENGTH_BITS}, - PrecompileError, -}; +use super::constants::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH, SCALAR_LENGTH_BITS}; +use crate::PrecompileError; use blst::{ blst_bendian_from_fp, blst_final_exp, blst_fp, blst_fp12, blst_fp12_is_one, blst_fp12_mul, blst_fp2, blst_fp_from_bendian, blst_map_to_g1, blst_map_to_g2, blst_miller_loop, blst_p1, @@ -622,7 +620,7 @@ fn is_valid_be(input: &[u8; 48]) -> bool { /// Performs point addition on two G1 points taking byte coordinates. #[inline] -pub(super) fn p1_add_affine_bytes( +pub fn p1_add_affine_bytes( a: G1Point, b: G1Point, ) -> Result<[u8; G1_LENGTH], crate::PrecompileError> { @@ -643,7 +641,7 @@ pub(super) fn p1_add_affine_bytes( /// Performs point addition on two G2 points taking byte coordinates. #[inline] -pub(super) fn p2_add_affine_bytes( +pub fn p2_add_affine_bytes( a: G2Point, b: G2Point, ) -> Result<[u8; G2_LENGTH], crate::PrecompileError> { @@ -664,7 +662,7 @@ pub(super) fn p2_add_affine_bytes( /// Maps a field element to a G1 point from bytes #[inline] -pub(super) fn map_fp_to_g1_bytes( +pub fn map_fp_to_g1_bytes( fp_bytes: &[u8; FP_LENGTH], ) -> Result<[u8; G1_LENGTH], crate::PrecompileError> { let fp = read_fp(fp_bytes)?; @@ -674,7 +672,7 @@ pub(super) fn map_fp_to_g1_bytes( /// Maps field elements to a G2 point from bytes #[inline] -pub(super) fn map_fp2_to_g2_bytes( +pub fn map_fp2_to_g2_bytes( fp2_x: &[u8; FP_LENGTH], fp2_y: &[u8; FP_LENGTH], ) -> Result<[u8; G2_LENGTH], crate::PrecompileError> { @@ -685,7 +683,7 @@ pub(super) fn map_fp2_to_g2_bytes( /// Performs multi-scalar multiplication (MSM) for G1 points taking byte inputs. #[inline] -pub(super) fn p1_msm_bytes( +pub fn p1_msm_bytes( point_scalar_pairs: impl Iterator< Item = Result<(G1Point, [u8; SCALAR_LENGTH]), crate::PrecompileError>, >, @@ -724,7 +722,7 @@ pub(super) fn p1_msm_bytes( /// Performs multi-scalar multiplication (MSM) for G2 points taking byte inputs. #[inline] -pub(super) fn p2_msm_bytes( +pub fn p2_msm_bytes( point_scalar_pairs: impl Iterator< Item = Result<(G2Point, [u8; SCALAR_LENGTH]), crate::PrecompileError>, >, @@ -763,7 +761,7 @@ pub(super) fn p2_msm_bytes( /// pairing_check_bytes performs a pairing check on a list of G1 and G2 point pairs taking byte inputs. #[inline] -pub(super) fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { +pub fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { if pairs.is_empty() { return Ok(true); } diff --git a/crates/precompile/src/crypto/bls12_381/constants.rs b/crates/precompile/src/crypto/bls12_381/constants.rs new file mode 100644 index 0000000000..f706c7d539 --- /dev/null +++ b/crates/precompile/src/crypto/bls12_381/constants.rs @@ -0,0 +1,19 @@ +//! Constants for BLS12-381 cryptographic operations + +/// Length of a field element in bytes +pub const FP_LENGTH: usize = 48; + +/// Length of a G1 point (x, y coordinates) +pub const G1_LENGTH: usize = 2 * FP_LENGTH; + +/// Length of a Fp2 element +pub const FP2_LENGTH: usize = 2 * FP_LENGTH; + +/// Length of a G2 point +pub const G2_LENGTH: usize = 2 * FP2_LENGTH; + +/// Length of a scalar field element +pub const SCALAR_LENGTH: usize = 32; + +/// Number of bits in a scalar field element +pub const SCALAR_LENGTH_BITS: usize = SCALAR_LENGTH * 8; \ No newline at end of file diff --git a/crates/precompile/src/crypto/bls12_381/mod.rs b/crates/precompile/src/crypto/bls12_381/mod.rs new file mode 100644 index 0000000000..7df00c93e2 --- /dev/null +++ b/crates/precompile/src/crypto/bls12_381/mod.rs @@ -0,0 +1,47 @@ +//! BLS12-381 cryptographic implementations + +pub mod constants; + +// Re-export type aliases used by implementations +pub use constants::FP_LENGTH; +/// G1 point represented as two field elements (x, y coordinates) +pub type G1Point = ([u8; FP_LENGTH], [u8; FP_LENGTH]); +/// G2 point represented as four field elements (x0, x1, y0, y1 coordinates) +pub type G2Point = ( + [u8; FP_LENGTH], + [u8; FP_LENGTH], + [u8; FP_LENGTH], + [u8; FP_LENGTH], +); +/// Pairing pair consisting of a G1 point and a G2 point +pub type PairingPair = (G1Point, G2Point); +/// G1 point paired with a scalar for multi-scalar multiplication +pub type G1PointScalarPair = (G1Point, [u8; constants::SCALAR_LENGTH]); +/// G2 point paired with a scalar for multi-scalar multiplication +pub type G2PointScalarPair = (G2Point, [u8; constants::SCALAR_LENGTH]); + +cfg_if::cfg_if! { + if #[cfg(feature = "blst")]{ + mod blst; + pub use blst::{ + p1_add_affine_bytes, + p2_add_affine_bytes, + p1_msm_bytes as g1_multiexp, + p2_msm_bytes as g2_multiexp, + pairing_check_bytes, + map_fp_to_g1_bytes as fp_to_g1, + map_fp2_to_g2_bytes as fp2_to_g2 + }; + } else { + mod arkworks; + pub use arkworks::{ + p1_add_affine_bytes, + p2_add_affine_bytes, + p1_msm_bytes as g1_multiexp, + p2_msm_bytes as g2_multiexp, + pairing_check_bytes, + map_fp_to_g1_bytes as fp_to_g1, + map_fp2_to_g2_bytes as fp2_to_g2 + }; + } +} \ No newline at end of file diff --git a/crates/precompile/src/bn128/arkworks.rs b/crates/precompile/src/crypto/bn128/arkworks.rs similarity index 96% rename from crates/precompile/src/bn128/arkworks.rs rename to crates/precompile/src/crypto/bn128/arkworks.rs index 63a14a9946..7df9de1fc0 100644 --- a/crates/precompile/src/bn128/arkworks.rs +++ b/crates/precompile/src/crypto/bn128/arkworks.rs @@ -1,4 +1,4 @@ -use super::{FQ2_LEN, FQ_LEN, G1_LEN, SCALAR_LEN}; +use super::constants::{FQ2_LEN, FQ_LEN, G1_LEN, SCALAR_LEN}; use crate::PrecompileError; use std::vec::Vec; @@ -180,7 +180,7 @@ pub(super) fn read_scalar(input: &[u8]) -> Fr { /// Performs point addition on two G1 points. #[inline] -pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { +pub fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { let p1 = read_g1_point(p1_bytes)?; let p2 = read_g1_point(p2_bytes)?; @@ -194,7 +194,7 @@ pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], /// Performs a G1 scalar multiplication. #[inline] -pub(super) fn g1_point_mul( +pub fn g1_point_mul( point_bytes: &[u8], fr_bytes: &[u8], ) -> Result<[u8; 64], PrecompileError> { @@ -215,7 +215,7 @@ pub(super) fn g1_point_mul( /// Note: If the input is empty, this function returns true. /// This is different to EIP2537 which disallows the empty input. #[inline] -pub(super) fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { +pub fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { let mut g1_points = Vec::with_capacity(pairs.len()); let mut g2_points = Vec::with_capacity(pairs.len()); diff --git a/crates/precompile/src/crypto/bn128/constants.rs b/crates/precompile/src/crypto/bn128/constants.rs new file mode 100644 index 0000000000..437675bc29 --- /dev/null +++ b/crates/precompile/src/crypto/bn128/constants.rs @@ -0,0 +1,13 @@ +//! Constants for BN128 cryptographic operations + +/// Field element length +pub const FQ_LEN: usize = 32; + +/// Scalar field element length +pub const SCALAR_LEN: usize = 32; + +/// Quadratic extension field element length (Fq2) +pub const FQ2_LEN: usize = 2 * FQ_LEN; + +/// G1 point length (x, y coordinates) +pub const G1_LEN: usize = 2 * FQ_LEN; \ No newline at end of file diff --git a/crates/precompile/src/crypto/bn128/mod.rs b/crates/precompile/src/crypto/bn128/mod.rs new file mode 100644 index 0000000000..2a15557e3f --- /dev/null +++ b/crates/precompile/src/crypto/bn128/mod.rs @@ -0,0 +1,13 @@ +//! BN128 cryptographic implementations + +pub mod constants; + +cfg_if::cfg_if! { + if #[cfg(feature = "bn")]{ + mod substrate; + pub use substrate::{g1_point_add, g1_point_mul, pairing_check}; + } else { + mod arkworks; + pub use arkworks::{g1_point_add, g1_point_mul, pairing_check}; + } +} \ No newline at end of file diff --git a/crates/precompile/src/bn128/substrate.rs b/crates/precompile/src/crypto/bn128/substrate.rs similarity index 95% rename from crates/precompile/src/bn128/substrate.rs rename to crates/precompile/src/crypto/bn128/substrate.rs index 23938c79c3..8c30b59a24 100644 --- a/crates/precompile/src/bn128/substrate.rs +++ b/crates/precompile/src/crypto/bn128/substrate.rs @@ -1,4 +1,4 @@ -use super::{FQ2_LEN, FQ_LEN, G1_LEN, SCALAR_LEN}; +use super::constants::{FQ2_LEN, FQ_LEN, G1_LEN, SCALAR_LEN}; use crate::PrecompileError; use bn::{AffineG1, AffineG2, Fq, Fq2, Group, Gt, G1, G2}; @@ -150,7 +150,7 @@ pub(super) fn read_scalar(input: &[u8]) -> bn::Fr { /// Performs point addition on two G1 points. #[inline] -pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { +pub fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { let p1 = read_g1_point(p1_bytes)?; let p2 = read_g1_point(p2_bytes)?; let result = p1 + p2; @@ -159,7 +159,7 @@ pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], /// Performs a G1 scalar multiplication. #[inline] -pub(super) fn g1_point_mul( +pub fn g1_point_mul( point_bytes: &[u8], fr_bytes: &[u8], ) -> Result<[u8; 64], PrecompileError> { @@ -175,7 +175,7 @@ pub(super) fn g1_point_mul( /// Note: If the input is empty, this function returns true. /// This is different to EIP2537 which disallows the empty input. #[inline] -pub(super) fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { +pub fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { let mut parsed_pairs = Vec::with_capacity(pairs.len()); for (g1_bytes, g2_bytes) in pairs { diff --git a/crates/precompile/src/crypto/hash/constants.rs b/crates/precompile/src/crypto/hash/constants.rs new file mode 100644 index 0000000000..3e52de738c --- /dev/null +++ b/crates/precompile/src/crypto/hash/constants.rs @@ -0,0 +1,7 @@ +//! Constants for hash functions + +/// SHA-256 output length in bytes +pub const SHA256_LENGTH: usize = 32; + +/// RIPEMD-160 output length in bytes +pub const RIPEMD160_LENGTH: usize = 20; \ No newline at end of file diff --git a/crates/precompile/src/crypto/hash/mod.rs b/crates/precompile/src/crypto/hash/mod.rs new file mode 100644 index 0000000000..2911103639 --- /dev/null +++ b/crates/precompile/src/crypto/hash/mod.rs @@ -0,0 +1,21 @@ +//! Hash function implementations + +pub mod constants; + +use sha2::Digest; + +/// Compute SHA-256 hash +pub fn sha256(input: &[u8]) -> [u8; constants::SHA256_LENGTH] { + let output = sha2::Sha256::digest(input); + output.into() +} + +/// Compute RIPEMD-160 hash (padded to 32 bytes) +pub fn ripemd160(input: &[u8]) -> [u8; 32] { + let mut hasher = ripemd::Ripemd160::new(); + hasher.update(input); + + let mut output = [0u8; 32]; + hasher.finalize_into((&mut output[12..]).into()); + output +} \ No newline at end of file diff --git a/crates/precompile/src/crypto/kzg/mod.rs b/crates/precompile/src/crypto/kzg/mod.rs new file mode 100644 index 0000000000..a5e5158f22 --- /dev/null +++ b/crates/precompile/src/crypto/kzg/mod.rs @@ -0,0 +1,40 @@ +//! KZG (Kate-Zaverucha-Goldberg) point evaluation + +cfg_if::cfg_if! { + if #[cfg(feature = "c-kzg")] { + use c_kzg::{Bytes32, Bytes48}; + } else if #[cfg(feature = "kzg-rs")] { + use kzg_rs::{Bytes32, Bytes48, KzgProof}; + } +} + +/// Verify KZG proof. +#[inline] +pub fn verify_kzg_proof( + commitment: &[u8; 48], + z: &[u8; 32], + y: &[u8; 32], + proof: &[u8; 48], +) -> bool { + cfg_if::cfg_if! { + if #[cfg(feature = "c-kzg")] { + let kzg_settings = c_kzg::ethereum_kzg_settings(8); + kzg_settings.verify_kzg_proof( + &Bytes48::from(*commitment), + &Bytes32::from(*z), + &Bytes32::from(*y), + &Bytes48::from(*proof) + ).unwrap_or(false) + } else if #[cfg(feature = "kzg-rs")] { + let env = kzg_rs::EnvKzgSettings::default(); + let kzg_settings = env.get(); + KzgProof::verify_kzg_proof( + Bytes48::from(*commitment), + Bytes32::from(*z), + Bytes32::from(*y), + Bytes48::from(*proof), + kzg_settings + ).unwrap_or(false) + } + } +} \ No newline at end of file diff --git a/crates/precompile/src/crypto/mod.rs b/crates/precompile/src/crypto/mod.rs new file mode 100644 index 0000000000..cde1cea6bc --- /dev/null +++ b/crates/precompile/src/crypto/mod.rs @@ -0,0 +1,29 @@ +//! Cryptographic backend implementations for precompiles +//! +//! This module contains pure cryptographic implementations used by various precompiles. +//! The precompile logic (addresses, gas costs, input parsing) remains in the parent modules. + +/// BN128 elliptic curve operations +pub mod bn128; + +/// BLS12-381 elliptic curve operations +pub mod bls12_381; + +/// Blake2 compression function +pub mod blake2; + +/// Hash functions (SHA-256, RIPEMD-160) +pub mod hash; + +/// KZG point evaluation +#[cfg(any(feature = "c-kzg", feature = "kzg-rs"))] +pub mod kzg; + +/// Modular exponentiation +pub mod modexp; + +/// secp256k1 elliptic curve operations +pub mod secp256k1; + +/// secp256r1 (P-256) elliptic curve operations +pub mod secp256r1; \ No newline at end of file diff --git a/crates/precompile/src/crypto/modexp/mod.rs b/crates/precompile/src/crypto/modexp/mod.rs new file mode 100644 index 0000000000..28689d0110 --- /dev/null +++ b/crates/precompile/src/crypto/modexp/mod.rs @@ -0,0 +1,28 @@ +//! Modular exponentiation implementations + +use std::vec::Vec; + +#[cfg(feature = "gmp")] +/// GMP-based modular exponentiation implementation +pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + use rug::{integer::Order::Msf, Integer}; + // Convert byte slices to GMP integers + let base_int = Integer::from_digits(base, Msf); + let exp_int = Integer::from_digits(exponent, Msf); + let mod_int = Integer::from_digits(modulus, Msf); + + // Perform modular exponentiation using GMP's pow_mod + let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); + + // Convert result back to bytes + let byte_count = result.significant_bits().div_ceil(8); + let mut output = vec![0u8; byte_count as usize]; + result.write_digits(&mut output, Msf); + output +} + +#[cfg(not(feature = "gmp"))] +/// Aurora engine modular exponentiation implementation +pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + aurora_engine_modexp::modexp(base, exponent, modulus) +} \ No newline at end of file diff --git a/crates/precompile/src/secp256k1/bitcoin_secp256k1.rs b/crates/precompile/src/crypto/secp256k1/bitcoin_secp256k1.rs similarity index 100% rename from crates/precompile/src/secp256k1/bitcoin_secp256k1.rs rename to crates/precompile/src/crypto/secp256k1/bitcoin_secp256k1.rs diff --git a/crates/precompile/src/crypto/secp256k1/constants.rs b/crates/precompile/src/crypto/secp256k1/constants.rs new file mode 100644 index 0000000000..7aafa7ec3f --- /dev/null +++ b/crates/precompile/src/crypto/secp256k1/constants.rs @@ -0,0 +1,7 @@ +//! Constants for secp256k1 cryptographic operations + +/// Length of the message hash (32 bytes) +pub const MESSAGE_HASH_LENGTH: usize = 32; + +/// Length of the signature (64 bytes: r || s) +pub const SIGNATURE_LENGTH: usize = 64; \ No newline at end of file diff --git a/crates/precompile/src/secp256k1/k256.rs b/crates/precompile/src/crypto/secp256k1/k256.rs similarity index 100% rename from crates/precompile/src/secp256k1/k256.rs rename to crates/precompile/src/crypto/secp256k1/k256.rs diff --git a/crates/precompile/src/crypto/secp256k1/mod.rs b/crates/precompile/src/crypto/secp256k1/mod.rs new file mode 100644 index 0000000000..98b22cb03b --- /dev/null +++ b/crates/precompile/src/crypto/secp256k1/mod.rs @@ -0,0 +1,16 @@ +//! secp256k1 cryptographic implementations + +pub mod constants; + +cfg_if::cfg_if! { + if #[cfg(feature = "secp256k1")]{ + mod bitcoin_secp256k1; + pub use bitcoin_secp256k1::ecrecover; + } else if #[cfg(feature = "libsecp256k1")]{ + mod parity_libsecp256k1; + pub use parity_libsecp256k1::ecrecover; + } else { + mod k256; + pub use k256::ecrecover; + } +} \ No newline at end of file diff --git a/crates/precompile/src/secp256k1/parity_libsecp256k1.rs b/crates/precompile/src/crypto/secp256k1/parity_libsecp256k1.rs similarity index 100% rename from crates/precompile/src/secp256k1/parity_libsecp256k1.rs rename to crates/precompile/src/crypto/secp256k1/parity_libsecp256k1.rs diff --git a/crates/precompile/src/crypto/secp256r1/constants.rs b/crates/precompile/src/crypto/secp256r1/constants.rs new file mode 100644 index 0000000000..24bff6de11 --- /dev/null +++ b/crates/precompile/src/crypto/secp256r1/constants.rs @@ -0,0 +1,10 @@ +//! Constants for secp256r1 (P-256) operations + +/// Length of the message hash (32 bytes) +pub const MESSAGE_HASH_LENGTH: usize = 32; + +/// Length of the signature (64 bytes: r || s) +pub const SIGNATURE_LENGTH: usize = 64; + +/// Length of the uncompressed public key (65 bytes: 0x04 || x || y) +pub const PUBKEY_LENGTH: usize = 65; \ No newline at end of file diff --git a/crates/precompile/src/crypto/secp256r1/mod.rs b/crates/precompile/src/crypto/secp256r1/mod.rs new file mode 100644 index 0000000000..08ca51d759 --- /dev/null +++ b/crates/precompile/src/crypto/secp256r1/mod.rs @@ -0,0 +1,27 @@ +//! secp256r1 (P-256) signature verification + +pub mod constants; + +use p256::ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}; + +/// Verify a secp256r1 signature +/// +/// # Arguments +/// * `msg` - The message hash (32 bytes) +/// * `sig` - The signature (64 bytes: r || s) +/// * `pk` - The uncompressed public key (65 bytes: 0x04 || x || y) +/// +/// # Returns +/// `Some(())` if the signature is valid, `None` otherwise +pub fn verify_signature( + msg: &[u8; constants::MESSAGE_HASH_LENGTH], + sig: &[u8; constants::SIGNATURE_LENGTH], + pk: &[u8; constants::PUBKEY_LENGTH], +) -> Option<()> { + // Can fail only if the input is not exact length. + let signature = Signature::from_slice(sig).ok()?; + // Can fail if the input is not valid, so we have to propagate the error. + let public_key = VerifyingKey::from_sec1_bytes(pk).ok()?; + + public_key.verify_prehash(msg, &signature).ok() +} \ No newline at end of file diff --git a/crates/precompile/src/hash.rs b/crates/precompile/src/hash.rs index 58286d6485..a7e36dab5c 100644 --- a/crates/precompile/src/hash.rs +++ b/crates/precompile/src/hash.rs @@ -2,7 +2,6 @@ //! More details in [`sha256_run`] and [`ripemd160_run`] use super::calc_linear_cost_u32; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; -use sha2::Digest; /// SHA-256 precompile pub const SHA256: PrecompileWithAddress = @@ -23,7 +22,7 @@ pub fn sha256_run(input: &[u8], gas_limit: u64) -> PrecompileResult { if cost > gas_limit { Err(PrecompileError::OutOfGas) } else { - let output = sha2::Sha256::digest(input); + let output = crate::crypto::hash::sha256(input); Ok(PrecompileOutput::new(cost, output.to_vec().into())) } } @@ -39,11 +38,7 @@ pub fn ripemd160_run(input: &[u8], gas_limit: u64) -> PrecompileResult { if gas_used > gas_limit { Err(PrecompileError::OutOfGas) } else { - let mut hasher = ripemd::Ripemd160::new(); - hasher.update(input); - - let mut output = [0u8; 32]; - hasher.finalize_into((&mut output[12..]).into()); + let output = crate::crypto::hash::ripemd160(input); Ok(PrecompileOutput::new(gas_used, output.to_vec().into())) } } diff --git a/crates/precompile/src/kzg_point_evaluation.rs b/crates/precompile/src/kzg_point_evaluation.rs index 9fb761cd7b..a28c386d9e 100644 --- a/crates/precompile/src/kzg_point_evaluation.rs +++ b/crates/precompile/src/kzg_point_evaluation.rs @@ -1,13 +1,6 @@ //! KZG point evaluation precompile added in [`EIP-4844`](https://eips.ethereum.org/EIPS/eip-4844) //! For more details check [`run`] function. use crate::{Address, PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; -cfg_if::cfg_if! { - if #[cfg(feature = "c-kzg")] { - use c_kzg::{Bytes32, Bytes48}; - } else if #[cfg(feature = "kzg-rs")] { - use kzg_rs::{Bytes32, Bytes48, KzgProof}; - } -} use primitives::hex_literal::hex; use sha2::{Digest, Sha256}; @@ -59,7 +52,7 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { let z = input[32..64].try_into().unwrap(); let y = input[64..96].try_into().unwrap(); let proof = input[144..192].try_into().unwrap(); - if !verify_kzg_proof(commitment, z, y, proof) { + if !crate::crypto::kzg::verify_kzg_proof(commitment, z, y, proof) { return Err(PrecompileError::BlobVerifyKzgProofFailed); } @@ -75,49 +68,6 @@ pub fn kzg_to_versioned_hash(commitment: &[u8]) -> [u8; 32] { hash } -/// Verify KZG proof. -#[inline] -pub fn verify_kzg_proof( - commitment: &[u8; 48], - z: &[u8; 32], - y: &[u8; 32], - proof: &[u8; 48], -) -> bool { - cfg_if::cfg_if! { - if #[cfg(feature = "c-kzg")] { - let kzg_settings = c_kzg::ethereum_kzg_settings(8); - kzg_settings.verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof)).unwrap_or(false) - } else if #[cfg(feature = "kzg-rs")] { - let env = kzg_rs::EnvKzgSettings::default(); - let kzg_settings = env.get(); - KzgProof::verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof), kzg_settings).unwrap_or(false) - } - } -} - -/// Convert a slice to an array of a specific size. -#[inline] -#[track_caller] -fn as_array(bytes: &[u8]) -> &[u8; N] { - bytes.try_into().expect("slice with incorrect length") -} - -/// Convert a slice to a 32 byte big endian array. -#[inline] -#[track_caller] -fn as_bytes32(bytes: &[u8]) -> &Bytes32 { - // SAFETY: `#[repr(C)] Bytes32([u8; 32])` - unsafe { &*as_array::<32>(bytes).as_ptr().cast() } -} - -/// Convert a slice to a 48 byte big endian array. -#[inline] -#[track_caller] -fn as_bytes48(bytes: &[u8]) -> &Bytes48 { - // SAFETY: `#[repr(C)] Bytes48([u8; 48])` - unsafe { &*as_array::<48>(bytes).as_ptr().cast() } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/precompile/src/lib.rs b/crates/precompile/src/lib.rs index 61079a1528..44b54692c6 100644 --- a/crates/precompile/src/lib.rs +++ b/crates/precompile/src/lib.rs @@ -8,6 +8,8 @@ #[cfg(not(feature = "std"))] extern crate alloc as std; +pub mod crypto; + pub mod blake2; pub mod bls12_381; pub mod bls12_381_const; diff --git a/crates/precompile/src/modexp.rs b/crates/precompile/src/modexp.rs index 08300abe3c..1686acef2d 100644 --- a/crates/precompile/src/modexp.rs +++ b/crates/precompile/src/modexp.rs @@ -6,7 +6,6 @@ use crate::{ }; use core::cmp::{max, min}; use primitives::{eip7823, Bytes, U256}; -use std::vec::Vec; /// `modexp` precompile with BYZANTIUM gas rules. pub const BYZANTIUM: PrecompileWithAddress = @@ -19,30 +18,6 @@ pub const BERLIN: PrecompileWithAddress = /// `modexp` precompile with OSAKA gas rules. pub const OSAKA: PrecompileWithAddress = PrecompileWithAddress(crate::u64_to_address(5), osaka_run); -#[cfg(feature = "gmp")] -/// GMP-based modular exponentiation implementation -fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - use rug::{integer::Order::Msf, Integer}; - // Convert byte slices to GMP integers - let base_int = Integer::from_digits(base, Msf); - let exp_int = Integer::from_digits(exponent, Msf); - let mod_int = Integer::from_digits(modulus, Msf); - - // Perform modular exponentiation using GMP's pow_mod - let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); - - // Convert result back to bytes - let byte_count = result.significant_bits().div_ceil(8); - let mut output = vec![0u8; byte_count as usize]; - result.write_digits(&mut output, Msf); - output -} - -#[cfg(not(feature = "gmp"))] -fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - aurora_engine_modexp::modexp(base, exponent, modulus) -} - /// See: /// See: pub fn byzantium_run(input: &[u8], gas_limit: u64) -> PrecompileResult { @@ -158,7 +133,7 @@ where debug_assert_eq!(modulus.len(), mod_len); // Call the modexp. - let output = modexp(base, exponent, modulus); + let output = crate::crypto::modexp::modexp(base, exponent, modulus); // Left pad the result to modulus length. bytes will always by less or equal to modulus length. Ok(PrecompileOutput::new( diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index 2ac987af13..2ff2e778b5 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -13,11 +13,6 @@ //! //! Output format: //! [32 bytes for recovered address] -#[cfg(feature = "secp256k1")] -pub mod bitcoin_secp256k1; -pub mod k256; -#[cfg(feature = "libsecp256k1")] -pub mod parity_libsecp256k1; use crate::{ utilities::right_pad, PrecompileError, PrecompileOutput, PrecompileResult, @@ -54,22 +49,11 @@ pub fn ec_recover_run(input: &[u8], gas_limit: u64) -> PrecompileResult { } fn ecrecover_bytes(sig: [u8; 64], recid: u8, msg: [u8; 32]) -> Option<[u8; 32]> { - let sig = B512::from_slice(&sig); - let msg = B256::from_slice(&msg); - - match ecrecover(&sig, recid, &msg) { - Ok(address) => Some(address.0), + let sig_b512 = B512::from(sig); + let msg_b256 = B256::from(msg); + + match crate::crypto::secp256k1::ecrecover(&sig_b512, recid, &msg_b256) { + Ok(result) => Some(result.0), Err(_) => None, } } - -// Select the correct implementation based on the enabled features. -cfg_if::cfg_if! { - if #[cfg(feature = "secp256k1")] { - pub use bitcoin_secp256k1::ecrecover; - } else if #[cfg(feature = "libsecp256k1")] { - pub use parity_libsecp256k1::ecrecover; - } else { - pub use k256::ecrecover; - } -} diff --git a/crates/precompile/src/secp256r1.rs b/crates/precompile/src/secp256r1.rs index 99318e2dad..86c381ee9a 100644 --- a/crates/precompile/src/secp256r1.rs +++ b/crates/precompile/src/secp256r1.rs @@ -9,7 +9,6 @@ use crate::{ u64_to_address, PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress, }; -use p256::ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}; use primitives::{alloy_primitives::B512, Bytes, B256}; /// Address of secp256r1 precompile. @@ -91,16 +90,7 @@ pub fn verify_impl(input: &[u8]) -> Option<()> { uncompressed_pk[0] = 0x04; uncompressed_pk[1..].copy_from_slice(pk); - verify_signature(msg.0, sig.0, uncompressed_pk) -} - -fn verify_signature(msg: [u8; 32], sig: [u8; 64], uncompressed_pk: [u8; 65]) -> Option<()> { - // Can fail only if the input is not exact length. - let signature = Signature::from_slice(&sig).ok()?; - // Can fail if the input is not valid, so we have to propagate the error. - let public_key = VerifyingKey::from_sec1_bytes(&uncompressed_pk).ok()?; - - public_key.verify_prehash(&msg, &signature).ok() + crate::crypto::secp256r1::verify_signature(&msg.0, &sig.0, &uncompressed_pk) } #[cfg(test)] From 498f84123174896db1679493189ccc819fa55cf9 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 18:46:55 +0100 Subject: [PATCH 02/16] fmt --- .../precompile/src/crypto/bls12_381/arkworks.rs | 16 ++++------------ crates/precompile/src/crypto/bls12_381/blst.rs | 2 +- .../precompile/src/crypto/bls12_381/constants.rs | 2 +- crates/precompile/src/crypto/bls12_381/mod.rs | 2 +- crates/precompile/src/crypto/bn128/arkworks.rs | 5 +---- crates/precompile/src/crypto/bn128/constants.rs | 2 +- crates/precompile/src/crypto/bn128/mod.rs | 2 +- crates/precompile/src/crypto/bn128/substrate.rs | 5 +---- crates/precompile/src/crypto/hash/constants.rs | 2 +- crates/precompile/src/crypto/hash/mod.rs | 4 ++-- crates/precompile/src/crypto/kzg/mod.rs | 8 ++++---- crates/precompile/src/crypto/mod.rs | 2 +- crates/precompile/src/crypto/modexp/mod.rs | 2 +- .../precompile/src/crypto/secp256k1/constants.rs | 2 +- crates/precompile/src/crypto/secp256k1/mod.rs | 2 +- .../precompile/src/crypto/secp256r1/constants.rs | 2 +- crates/precompile/src/crypto/secp256r1/mod.rs | 4 ++-- crates/precompile/src/secp256k1.rs | 2 +- 18 files changed, 26 insertions(+), 40 deletions(-) diff --git a/crates/precompile/src/crypto/bls12_381/arkworks.rs b/crates/precompile/src/crypto/bls12_381/arkworks.rs index 018525d7de..28af49f215 100644 --- a/crates/precompile/src/crypto/bls12_381/arkworks.rs +++ b/crates/precompile/src/crypto/bls12_381/arkworks.rs @@ -1,5 +1,5 @@ -use super::{G1Point, G2Point, PairingPair}; use super::constants::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH}; +use super::{G1Point, G2Point, PairingPair}; use crate::PrecompileError; use ark_bls12_381::{Bls12_381, Fq, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; use ark_ec::{ @@ -403,10 +403,7 @@ pub fn pairing_check_bytes(pairs: &[PairingPair]) -> Result Result<[u8; G1_LENGTH], PrecompileError> { +pub fn p1_add_affine_bytes(a: G1Point, b: G1Point) -> Result<[u8; G1_LENGTH], PrecompileError> { let (a_x, a_y) = a; let (b_x, b_y) = b; // Parse first point @@ -424,10 +421,7 @@ pub fn p1_add_affine_bytes( /// Performs point addition on two G2 points taking byte coordinates. #[inline] -pub fn p2_add_affine_bytes( - a: G2Point, - b: G2Point, -) -> Result<[u8; G2_LENGTH], PrecompileError> { +pub fn p2_add_affine_bytes(a: G2Point, b: G2Point) -> Result<[u8; G2_LENGTH], PrecompileError> { let (a_x_0, a_x_1, a_y_0, a_y_1) = a; let (b_x_0, b_x_1, b_y_0, b_y_1) = b; // Parse first point @@ -445,9 +439,7 @@ pub fn p2_add_affine_bytes( /// Maps a field element to a G1 point from bytes #[inline] -pub fn map_fp_to_g1_bytes( - fp_bytes: &[u8; FP_LENGTH], -) -> Result<[u8; G1_LENGTH], PrecompileError> { +pub fn map_fp_to_g1_bytes(fp_bytes: &[u8; FP_LENGTH]) -> Result<[u8; G1_LENGTH], PrecompileError> { let fp = read_fp(fp_bytes)?; let result = map_fp_to_g1(&fp); Ok(encode_g1_point(&result)) diff --git a/crates/precompile/src/crypto/bls12_381/blst.rs b/crates/precompile/src/crypto/bls12_381/blst.rs index 3a90a818af..b3c2913c3b 100644 --- a/crates/precompile/src/crypto/bls12_381/blst.rs +++ b/crates/precompile/src/crypto/bls12_381/blst.rs @@ -1,7 +1,7 @@ // This module contains a safe wrapper around the blst library. -use super::{G1Point, G2Point, PairingPair}; use super::constants::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH, SCALAR_LENGTH_BITS}; +use super::{G1Point, G2Point, PairingPair}; use crate::PrecompileError; use blst::{ blst_bendian_from_fp, blst_final_exp, blst_fp, blst_fp12, blst_fp12_is_one, blst_fp12_mul, diff --git a/crates/precompile/src/crypto/bls12_381/constants.rs b/crates/precompile/src/crypto/bls12_381/constants.rs index f706c7d539..9d76feb476 100644 --- a/crates/precompile/src/crypto/bls12_381/constants.rs +++ b/crates/precompile/src/crypto/bls12_381/constants.rs @@ -16,4 +16,4 @@ pub const G2_LENGTH: usize = 2 * FP2_LENGTH; pub const SCALAR_LENGTH: usize = 32; /// Number of bits in a scalar field element -pub const SCALAR_LENGTH_BITS: usize = SCALAR_LENGTH * 8; \ No newline at end of file +pub const SCALAR_LENGTH_BITS: usize = SCALAR_LENGTH * 8; diff --git a/crates/precompile/src/crypto/bls12_381/mod.rs b/crates/precompile/src/crypto/bls12_381/mod.rs index 7df00c93e2..c69cca0d61 100644 --- a/crates/precompile/src/crypto/bls12_381/mod.rs +++ b/crates/precompile/src/crypto/bls12_381/mod.rs @@ -44,4 +44,4 @@ cfg_if::cfg_if! { map_fp2_to_g2_bytes as fp2_to_g2 }; } -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/bn128/arkworks.rs b/crates/precompile/src/crypto/bn128/arkworks.rs index 7df9de1fc0..8a857d95d8 100644 --- a/crates/precompile/src/crypto/bn128/arkworks.rs +++ b/crates/precompile/src/crypto/bn128/arkworks.rs @@ -194,10 +194,7 @@ pub fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], Precom /// Performs a G1 scalar multiplication. #[inline] -pub fn g1_point_mul( - point_bytes: &[u8], - fr_bytes: &[u8], -) -> Result<[u8; 64], PrecompileError> { +pub fn g1_point_mul(point_bytes: &[u8], fr_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { let p = read_g1_point(point_bytes)?; let fr = read_scalar(fr_bytes); diff --git a/crates/precompile/src/crypto/bn128/constants.rs b/crates/precompile/src/crypto/bn128/constants.rs index 437675bc29..d78d493daa 100644 --- a/crates/precompile/src/crypto/bn128/constants.rs +++ b/crates/precompile/src/crypto/bn128/constants.rs @@ -10,4 +10,4 @@ pub const SCALAR_LEN: usize = 32; pub const FQ2_LEN: usize = 2 * FQ_LEN; /// G1 point length (x, y coordinates) -pub const G1_LEN: usize = 2 * FQ_LEN; \ No newline at end of file +pub const G1_LEN: usize = 2 * FQ_LEN; diff --git a/crates/precompile/src/crypto/bn128/mod.rs b/crates/precompile/src/crypto/bn128/mod.rs index 2a15557e3f..6484259ec6 100644 --- a/crates/precompile/src/crypto/bn128/mod.rs +++ b/crates/precompile/src/crypto/bn128/mod.rs @@ -10,4 +10,4 @@ cfg_if::cfg_if! { mod arkworks; pub use arkworks::{g1_point_add, g1_point_mul, pairing_check}; } -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/bn128/substrate.rs b/crates/precompile/src/crypto/bn128/substrate.rs index 8c30b59a24..54c7387f47 100644 --- a/crates/precompile/src/crypto/bn128/substrate.rs +++ b/crates/precompile/src/crypto/bn128/substrate.rs @@ -159,10 +159,7 @@ pub fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], Precom /// Performs a G1 scalar multiplication. #[inline] -pub fn g1_point_mul( - point_bytes: &[u8], - fr_bytes: &[u8], -) -> Result<[u8; 64], PrecompileError> { +pub fn g1_point_mul(point_bytes: &[u8], fr_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { let p = read_g1_point(point_bytes)?; let fr = read_scalar(fr_bytes); let result = p * fr; diff --git a/crates/precompile/src/crypto/hash/constants.rs b/crates/precompile/src/crypto/hash/constants.rs index 3e52de738c..69383df5af 100644 --- a/crates/precompile/src/crypto/hash/constants.rs +++ b/crates/precompile/src/crypto/hash/constants.rs @@ -4,4 +4,4 @@ pub const SHA256_LENGTH: usize = 32; /// RIPEMD-160 output length in bytes -pub const RIPEMD160_LENGTH: usize = 20; \ No newline at end of file +pub const RIPEMD160_LENGTH: usize = 20; diff --git a/crates/precompile/src/crypto/hash/mod.rs b/crates/precompile/src/crypto/hash/mod.rs index 2911103639..1a8c0df796 100644 --- a/crates/precompile/src/crypto/hash/mod.rs +++ b/crates/precompile/src/crypto/hash/mod.rs @@ -14,8 +14,8 @@ pub fn sha256(input: &[u8]) -> [u8; constants::SHA256_LENGTH] { pub fn ripemd160(input: &[u8]) -> [u8; 32] { let mut hasher = ripemd::Ripemd160::new(); hasher.update(input); - + let mut output = [0u8; 32]; hasher.finalize_into((&mut output[12..]).into()); output -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/kzg/mod.rs b/crates/precompile/src/crypto/kzg/mod.rs index a5e5158f22..fad58082ad 100644 --- a/crates/precompile/src/crypto/kzg/mod.rs +++ b/crates/precompile/src/crypto/kzg/mod.rs @@ -20,9 +20,9 @@ pub fn verify_kzg_proof( if #[cfg(feature = "c-kzg")] { let kzg_settings = c_kzg::ethereum_kzg_settings(8); kzg_settings.verify_kzg_proof( - &Bytes48::from(*commitment), - &Bytes32::from(*z), - &Bytes32::from(*y), + &Bytes48::from(*commitment), + &Bytes32::from(*z), + &Bytes32::from(*y), &Bytes48::from(*proof) ).unwrap_or(false) } else if #[cfg(feature = "kzg-rs")] { @@ -37,4 +37,4 @@ pub fn verify_kzg_proof( ).unwrap_or(false) } } -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/mod.rs b/crates/precompile/src/crypto/mod.rs index cde1cea6bc..020f346c38 100644 --- a/crates/precompile/src/crypto/mod.rs +++ b/crates/precompile/src/crypto/mod.rs @@ -26,4 +26,4 @@ pub mod modexp; pub mod secp256k1; /// secp256r1 (P-256) elliptic curve operations -pub mod secp256r1; \ No newline at end of file +pub mod secp256r1; diff --git a/crates/precompile/src/crypto/modexp/mod.rs b/crates/precompile/src/crypto/modexp/mod.rs index 28689d0110..c76174f2c2 100644 --- a/crates/precompile/src/crypto/modexp/mod.rs +++ b/crates/precompile/src/crypto/modexp/mod.rs @@ -25,4 +25,4 @@ pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { /// Aurora engine modular exponentiation implementation pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { aurora_engine_modexp::modexp(base, exponent, modulus) -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/secp256k1/constants.rs b/crates/precompile/src/crypto/secp256k1/constants.rs index 7aafa7ec3f..e970d20621 100644 --- a/crates/precompile/src/crypto/secp256k1/constants.rs +++ b/crates/precompile/src/crypto/secp256k1/constants.rs @@ -4,4 +4,4 @@ pub const MESSAGE_HASH_LENGTH: usize = 32; /// Length of the signature (64 bytes: r || s) -pub const SIGNATURE_LENGTH: usize = 64; \ No newline at end of file +pub const SIGNATURE_LENGTH: usize = 64; diff --git a/crates/precompile/src/crypto/secp256k1/mod.rs b/crates/precompile/src/crypto/secp256k1/mod.rs index 98b22cb03b..e2f0d91d81 100644 --- a/crates/precompile/src/crypto/secp256k1/mod.rs +++ b/crates/precompile/src/crypto/secp256k1/mod.rs @@ -13,4 +13,4 @@ cfg_if::cfg_if! { mod k256; pub use k256::ecrecover; } -} \ No newline at end of file +} diff --git a/crates/precompile/src/crypto/secp256r1/constants.rs b/crates/precompile/src/crypto/secp256r1/constants.rs index 24bff6de11..9ed75b8cb9 100644 --- a/crates/precompile/src/crypto/secp256r1/constants.rs +++ b/crates/precompile/src/crypto/secp256r1/constants.rs @@ -7,4 +7,4 @@ pub const MESSAGE_HASH_LENGTH: usize = 32; pub const SIGNATURE_LENGTH: usize = 64; /// Length of the uncompressed public key (65 bytes: 0x04 || x || y) -pub const PUBKEY_LENGTH: usize = 65; \ No newline at end of file +pub const PUBKEY_LENGTH: usize = 65; diff --git a/crates/precompile/src/crypto/secp256r1/mod.rs b/crates/precompile/src/crypto/secp256r1/mod.rs index 08ca51d759..1ea44e137c 100644 --- a/crates/precompile/src/crypto/secp256r1/mod.rs +++ b/crates/precompile/src/crypto/secp256r1/mod.rs @@ -5,7 +5,7 @@ pub mod constants; use p256::ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}; /// Verify a secp256r1 signature -/// +/// /// # Arguments /// * `msg` - The message hash (32 bytes) /// * `sig` - The signature (64 bytes: r || s) @@ -24,4 +24,4 @@ pub fn verify_signature( let public_key = VerifyingKey::from_sec1_bytes(pk).ok()?; public_key.verify_prehash(msg, &signature).ok() -} \ No newline at end of file +} diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index 2ff2e778b5..7291435211 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -51,7 +51,7 @@ pub fn ec_recover_run(input: &[u8], gas_limit: u64) -> PrecompileResult { fn ecrecover_bytes(sig: [u8; 64], recid: u8, msg: [u8; 32]) -> Option<[u8; 32]> { let sig_b512 = B512::from(sig); let msg_b256 = B256::from(msg); - + match crate::crypto::secp256k1::ecrecover(&sig_b512, recid, &msg_b256) { Ok(result) => Some(result.0), Err(_) => None, From be9fa694f8c3e4f136938fafaacc015314800c66 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 18:54:06 +0100 Subject: [PATCH 03/16] nit --- crates/precompile/src/secp256k1.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index 7291435211..891dfb0846 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -49,11 +49,11 @@ pub fn ec_recover_run(input: &[u8], gas_limit: u64) -> PrecompileResult { } fn ecrecover_bytes(sig: [u8; 64], recid: u8, msg: [u8; 32]) -> Option<[u8; 32]> { - let sig_b512 = B512::from(sig); - let msg_b256 = B256::from(msg); + let sig = B512::from(sig); + let msg = B256::from(msg); - match crate::crypto::secp256k1::ecrecover(&sig_b512, recid, &msg_b256) { - Ok(result) => Some(result.0), + match crate::crypto::secp256k1::ecrecover(&sig, recid, &msg) { + Ok(address) => Some(address.0), Err(_) => None, } } From 6f026f2e28818ebfca456bc35ac893ebab7304a6 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 18:57:39 +0100 Subject: [PATCH 04/16] remove export alias --- crates/precompile/src/bls12_381/g1_msm.rs | 2 +- crates/precompile/src/bls12_381/g2_msm.rs | 2 +- crates/precompile/src/bls12_381/map_fp2_to_g2.rs | 2 +- crates/precompile/src/bls12_381/map_fp_to_g1.rs | 2 +- crates/precompile/src/crypto/bls12_381/mod.rs | 16 ++++++++-------- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index c03d732ddb..47e9f3f0c0 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -46,7 +46,7 @@ pub fn g1_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = crate::crypto::bls12_381::g1_multiexp(valid_pairs_iter)?; + let unpadded_result = crate::crypto::bls12_381::p1_msm_bytes(valid_pairs_iter)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 6cfcbe0f60..726e3282d6 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -46,7 +46,7 @@ pub fn g2_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = crate::crypto::bls12_381::g2_multiexp(valid_pairs_iter)?; + let unpadded_result = crate::crypto::bls12_381::p2_msm_bytes(valid_pairs_iter)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs index eb2424ade6..3a38a35d6d 100644 --- a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs +++ b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs @@ -30,7 +30,7 @@ pub fn map_fp2_to_g2(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0_y = remove_fp_padding(&input[PADDED_FP_LENGTH..PADDED_FP2_LENGTH])?; // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::fp2_to_g2(input_p0_x, input_p0_y)?; + let unpadded_result = crate::crypto::bls12_381::map_fp2_to_g2_bytes(input_p0_x, input_p0_y)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp_to_g1.rs b/crates/precompile/src/bls12_381/map_fp_to_g1.rs index ce7d91f399..dede1947e3 100644 --- a/crates/precompile/src/bls12_381/map_fp_to_g1.rs +++ b/crates/precompile/src/bls12_381/map_fp_to_g1.rs @@ -25,7 +25,7 @@ pub fn map_fp_to_g1(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0 = remove_fp_padding(input)?; // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::fp_to_g1(input_p0)?; + let unpadded_result = crate::crypto::bls12_381::map_fp_to_g1_bytes(input_p0)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/crypto/bls12_381/mod.rs b/crates/precompile/src/crypto/bls12_381/mod.rs index c69cca0d61..a9f6af9b2d 100644 --- a/crates/precompile/src/crypto/bls12_381/mod.rs +++ b/crates/precompile/src/crypto/bls12_381/mod.rs @@ -26,22 +26,22 @@ cfg_if::cfg_if! { pub use blst::{ p1_add_affine_bytes, p2_add_affine_bytes, - p1_msm_bytes as g1_multiexp, - p2_msm_bytes as g2_multiexp, + p1_msm_bytes, + p2_msm_bytes, pairing_check_bytes, - map_fp_to_g1_bytes as fp_to_g1, - map_fp2_to_g2_bytes as fp2_to_g2 + map_fp_to_g1_bytes, + map_fp2_to_g2_bytes }; } else { mod arkworks; pub use arkworks::{ p1_add_affine_bytes, p2_add_affine_bytes, - p1_msm_bytes as g1_multiexp, - p2_msm_bytes as g2_multiexp, + p1_msm_bytes, + p2_msm_bytes, pairing_check_bytes, - map_fp_to_g1_bytes as fp_to_g1, - map_fp2_to_g2_bytes as fp2_to_g2 + map_fp_to_g1_bytes, + map_fp2_to_g2_bytes }; } } From 64ae5798e556734ae70eaf83c3327d4a994a2d2d Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 18:57:55 +0100 Subject: [PATCH 05/16] remove cryptography specific benchmark --- crates/precompile/bench/blake2.rs | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/crates/precompile/bench/blake2.rs b/crates/precompile/bench/blake2.rs index 053552fad4..5d3b6744be 100644 --- a/crates/precompile/bench/blake2.rs +++ b/crates/precompile/bench/blake2.rs @@ -87,30 +87,4 @@ pub fn add_benches(group: &mut BenchmarkGroup<'_, criterion::measurement::WallTi black_box(blake2::run(black_box(input), u64::MAX).unwrap()); }); }); - - // Benchmark just the compression function with different round counts - group.bench_function("blake2/compress_12_rounds", |b| { - let h = [ - 0x6a09e667f3bcc908u64, - 0xbb67ae8584caa73bu64, - 0x3c6ef372fe94f82bu64, - 0xa54ff53a5f1d36f1u64, - 0x510e527fade682d1u64, - 0x9b05688c2b3e6c1fu64, - 0x1f83d9abfb41bd6bu64, - 0x5be0cd19137e2179u64, - ]; - let m = [0u64; 16]; - let t = [0u64, 0u64]; - b.iter(|| { - let mut h_copy = h; - blake2::algo::compress( - black_box(12), - &mut h_copy, - black_box(m), - black_box(t), - black_box(false), - ); - }); - }); } From a6eca5d896f5868cca883af1415ce9133c6bae52 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:00:41 +0100 Subject: [PATCH 06/16] add note about padding --- crates/precompile/src/crypto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/precompile/src/crypto/mod.rs b/crates/precompile/src/crypto/mod.rs index 020f346c38..ee25bf37b6 100644 --- a/crates/precompile/src/crypto/mod.rs +++ b/crates/precompile/src/crypto/mod.rs @@ -1,7 +1,7 @@ //! Cryptographic backend implementations for precompiles //! //! This module contains pure cryptographic implementations used by various precompiles. -//! The precompile logic (addresses, gas costs, input parsing) remains in the parent modules. +//! The precompile and Ethereum specific logic (addresses, gas costs, input parsing, evm padding) remains in the parent modules. /// BN128 elliptic curve operations pub mod bn128; From 6b6838e04509aff5bc41b8491b32d285d6146860 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:02:43 +0100 Subject: [PATCH 07/16] add inline(always) for hash functions --- crates/precompile/src/crypto/hash/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/precompile/src/crypto/hash/mod.rs b/crates/precompile/src/crypto/hash/mod.rs index 1a8c0df796..9fa2eff7f6 100644 --- a/crates/precompile/src/crypto/hash/mod.rs +++ b/crates/precompile/src/crypto/hash/mod.rs @@ -5,12 +5,14 @@ pub mod constants; use sha2::Digest; /// Compute SHA-256 hash +#[inline(always)] pub fn sha256(input: &[u8]) -> [u8; constants::SHA256_LENGTH] { let output = sha2::Sha256::digest(input); output.into() } /// Compute RIPEMD-160 hash (padded to 32 bytes) +#[inline(always)] pub fn ripemd160(input: &[u8]) -> [u8; 32] { let mut hasher = ripemd::Ripemd160::new(); hasher.update(input); From 5caec666e1a396a6314499a22993b6b8de2e3f50 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:15:50 +0100 Subject: [PATCH 08/16] copy as_array --- crates/precompile/src/crypto/kzg/mod.rs | 38 ++++++++++++++++--------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/crates/precompile/src/crypto/kzg/mod.rs b/crates/precompile/src/crypto/kzg/mod.rs index fad58082ad..9236739431 100644 --- a/crates/precompile/src/crypto/kzg/mod.rs +++ b/crates/precompile/src/crypto/kzg/mod.rs @@ -19,22 +19,34 @@ pub fn verify_kzg_proof( cfg_if::cfg_if! { if #[cfg(feature = "c-kzg")] { let kzg_settings = c_kzg::ethereum_kzg_settings(8); - kzg_settings.verify_kzg_proof( - &Bytes48::from(*commitment), - &Bytes32::from(*z), - &Bytes32::from(*y), - &Bytes48::from(*proof) - ).unwrap_or(false) + kzg_settings.verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof)).unwrap_or(false) } else if #[cfg(feature = "kzg-rs")] { let env = kzg_rs::EnvKzgSettings::default(); let kzg_settings = env.get(); - KzgProof::verify_kzg_proof( - Bytes48::from(*commitment), - Bytes32::from(*z), - Bytes32::from(*y), - Bytes48::from(*proof), - kzg_settings - ).unwrap_or(false) + KzgProof::verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof), kzg_settings).unwrap_or(false) } } } + +/// Convert a slice to an array of a specific size. +#[inline] +#[track_caller] +fn as_array(bytes: &[u8]) -> &[u8; N] { + bytes.try_into().expect("slice with incorrect length") +} + +/// Convert a slice to a 32 byte big endian array. +#[inline] +#[track_caller] +fn as_bytes32(bytes: &[u8]) -> &Bytes32 { + // SAFETY: `#[repr(C)] Bytes32([u8; 32])` + unsafe { &*as_array::<32>(bytes).as_ptr().cast() } +} + +/// Convert a slice to a 48 byte big endian array. +#[inline] +#[track_caller] +fn as_bytes48(bytes: &[u8]) -> &Bytes48 { + // SAFETY: `#[repr(C)] Bytes48([u8; 48])` + unsafe { &*as_array::<48>(bytes).as_ptr().cast() } +} From 81643c9a8d685942bec61062b9f86801f4f96f56 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:44:53 +0100 Subject: [PATCH 09/16] move cfg to crypto specific sub-modules --- crates/precompile/src/crypto/blake2/mod.rs | 4 ++ crates/precompile/src/crypto/bls12_381/mod.rs | 10 ++++ crates/precompile/src/crypto/bn128/mod.rs | 10 ++++ crates/precompile/src/crypto/kzg/mod.rs | 4 ++ crates/precompile/src/crypto/modexp/mod.rs | 47 ++++++++++--------- crates/precompile/src/crypto/secp256k1/mod.rs | 11 +++++ crates/precompile/src/lib.rs | 31 ------------ 7 files changed, 65 insertions(+), 52 deletions(-) diff --git a/crates/precompile/src/crypto/blake2/mod.rs b/crates/precompile/src/crypto/blake2/mod.rs index 867c061423..c4dd95b8bd 100644 --- a/crates/precompile/src/crypto/blake2/mod.rs +++ b/crates/precompile/src/crypto/blake2/mod.rs @@ -1,5 +1,9 @@ //! Blake2 cryptographic implementations +// Silence arrayref when AVX2 is not available +#[cfg(not(all(target_feature = "avx2", feature = "std")))] +use arrayref as _; + // Re-export the main compress function for external use pub use algo::compress; diff --git a/crates/precompile/src/crypto/bls12_381/mod.rs b/crates/precompile/src/crypto/bls12_381/mod.rs index a9f6af9b2d..bc36b466f6 100644 --- a/crates/precompile/src/crypto/bls12_381/mod.rs +++ b/crates/precompile/src/crypto/bls12_381/mod.rs @@ -2,6 +2,16 @@ pub mod constants; +// silence arkworks-bls12-381 lint as blst will be used as default if both are enabled. +cfg_if::cfg_if! { + if #[cfg(feature = "blst")]{ + use ark_bls12_381 as _; + use ark_ff as _; + use ark_ec as _; + use ark_serialize as _; + } +} + // Re-export type aliases used by implementations pub use constants::FP_LENGTH; /// G1 point represented as two field elements (x, y coordinates) diff --git a/crates/precompile/src/crypto/bn128/mod.rs b/crates/precompile/src/crypto/bn128/mod.rs index 6484259ec6..4e735b19a3 100644 --- a/crates/precompile/src/crypto/bn128/mod.rs +++ b/crates/precompile/src/crypto/bn128/mod.rs @@ -2,6 +2,16 @@ pub mod constants; +// silence arkworks lint as bn impl will be used as default if both are enabled. +cfg_if::cfg_if! { + if #[cfg(feature = "bn")]{ + use ark_bn254 as _; + use ark_ff as _; + use ark_ec as _; + use ark_serialize as _; + } +} + cfg_if::cfg_if! { if #[cfg(feature = "bn")]{ mod substrate; diff --git a/crates/precompile/src/crypto/kzg/mod.rs b/crates/precompile/src/crypto/kzg/mod.rs index 9236739431..f620e30c77 100644 --- a/crates/precompile/src/crypto/kzg/mod.rs +++ b/crates/precompile/src/crypto/kzg/mod.rs @@ -1,5 +1,9 @@ //! KZG (Kate-Zaverucha-Goldberg) point evaluation +#[cfg(all(feature = "c-kzg", feature = "kzg-rs"))] +// silence kzg-rs lint as c-kzg will be used as default if both are enabled. +use kzg_rs as _; + cfg_if::cfg_if! { if #[cfg(feature = "c-kzg")] { use c_kzg::{Bytes32, Bytes48}; diff --git a/crates/precompile/src/crypto/modexp/mod.rs b/crates/precompile/src/crypto/modexp/mod.rs index c76174f2c2..c871e3f966 100644 --- a/crates/precompile/src/crypto/modexp/mod.rs +++ b/crates/precompile/src/crypto/modexp/mod.rs @@ -2,27 +2,32 @@ use std::vec::Vec; -#[cfg(feature = "gmp")] -/// GMP-based modular exponentiation implementation -pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - use rug::{integer::Order::Msf, Integer}; - // Convert byte slices to GMP integers - let base_int = Integer::from_digits(base, Msf); - let exp_int = Integer::from_digits(exponent, Msf); - let mod_int = Integer::from_digits(modulus, Msf); +cfg_if::cfg_if! { + if #[cfg(feature = "gmp")] { + // Silence aurora-engine-modexp when gmp is enabled + use aurora_engine_modexp as _; + + /// GMP-based modular exponentiation implementation + pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + use rug::{integer::Order::Msf, Integer}; + // Convert byte slices to GMP integers + let base_int = Integer::from_digits(base, Msf); + let exp_int = Integer::from_digits(exponent, Msf); + let mod_int = Integer::from_digits(modulus, Msf); - // Perform modular exponentiation using GMP's pow_mod - let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); + // Perform modular exponentiation using GMP's pow_mod + let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); - // Convert result back to bytes - let byte_count = result.significant_bits().div_ceil(8); - let mut output = vec![0u8; byte_count as usize]; - result.write_digits(&mut output, Msf); - output -} - -#[cfg(not(feature = "gmp"))] -/// Aurora engine modular exponentiation implementation -pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - aurora_engine_modexp::modexp(base, exponent, modulus) + // Convert result back to bytes + let byte_count = result.significant_bits().div_ceil(8); + let mut output = vec![0u8; byte_count as usize]; + result.write_digits(&mut output, Msf); + output + } + } else { + /// Aurora engine modular exponentiation implementation + pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + aurora_engine_modexp::modexp(base, exponent, modulus) + } + } } diff --git a/crates/precompile/src/crypto/secp256k1/mod.rs b/crates/precompile/src/crypto/secp256k1/mod.rs index e2f0d91d81..4d1a7e078f 100644 --- a/crates/precompile/src/crypto/secp256k1/mod.rs +++ b/crates/precompile/src/crypto/secp256k1/mod.rs @@ -2,13 +2,24 @@ pub mod constants; +// Select and silence unused dependencies based on feature selection cfg_if::cfg_if! { if #[cfg(feature = "secp256k1")]{ mod bitcoin_secp256k1; pub use bitcoin_secp256k1::ecrecover; + + // k256 is unused when secp256k1 is selected + use k256 as _; + + // libsecp256k1 is also unused when secp256k1 is selected + #[cfg(feature = "libsecp256k1")] + use libsecp256k1 as _; } else if #[cfg(feature = "libsecp256k1")]{ mod parity_libsecp256k1; pub use parity_libsecp256k1::ecrecover; + + // k256 is unused when libsecp256k1 is selected + use k256 as _; } else { mod k256; pub use k256::ecrecover; diff --git a/crates/precompile/src/lib.rs b/crates/precompile/src/lib.rs index 44b54692c6..6b3a337dea 100644 --- a/crates/precompile/src/lib.rs +++ b/crates/precompile/src/lib.rs @@ -27,37 +27,6 @@ pub mod utilities; pub use interface::*; -// silence arkworks lint as bn impl will be used as default if both are enabled. -cfg_if::cfg_if! { - if #[cfg(feature = "bn")]{ - use ark_bn254 as _; - use ark_ff as _; - use ark_ec as _; - use ark_serialize as _; - } -} - -#[cfg(not(target_feature = "avx2"))] -use arrayref as _; - -#[cfg(all(feature = "c-kzg", feature = "kzg-rs"))] -// silence kzg-rs lint as c-kzg will be used as default if both are enabled. -use kzg_rs as _; - -// silence arkworks-bls12-381 lint as blst will be used as default if both are enabled. -cfg_if::cfg_if! { - if #[cfg(feature = "blst")]{ - use ark_bls12_381 as _; - use ark_ff as _; - use ark_ec as _; - use ark_serialize as _; - } -} - -// silence aurora-engine-modexp if gmp is enabled -#[cfg(feature = "gmp")] -use aurora_engine_modexp as _; - use cfg_if::cfg_if; use core::hash::Hash; use once_cell::race::OnceBox; From 2e9c3777378756aa067bc0a700557a087f6ed955 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:47:31 +0100 Subject: [PATCH 10/16] move doc comment to crypto specific module --- crates/precompile/src/crypto/secp256k1/mod.rs | 8 ++++++++ crates/precompile/src/secp256k1.rs | 9 ++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/crates/precompile/src/crypto/secp256k1/mod.rs b/crates/precompile/src/crypto/secp256k1/mod.rs index 4d1a7e078f..82a6bfbd5d 100644 --- a/crates/precompile/src/crypto/secp256k1/mod.rs +++ b/crates/precompile/src/crypto/secp256k1/mod.rs @@ -1,4 +1,12 @@ //! secp256k1 cryptographic implementations +//! +//! Depending on enabled features, it will use different implementations of `ecrecover`: +//! * [`k256`](https://crates.io/crates/k256) - uses maintained pure rust lib `k256`, it is perfect use for no_std environments. +//! * [`secp256k1`](https://crates.io/crates/secp256k1) - uses `bitcoin_secp256k1` lib, it is a C implementation of secp256k1 used in bitcoin core. +//! It is faster than k256 and enabled by default and in std environment. +//! * [`libsecp256k1`](https://crates.io/crates/libsecp256k1) - is made from parity in pure rust, it is alternative for k256. +//! +//! Order of preference is `secp256k1` -> `k256` -> `libsecp256k1`. Where if no features are enabled, it will use `k256`. pub mod constants; diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index 891dfb0846..a8e41b6a94 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -1,12 +1,7 @@ //! `ecrecover` precompile. //! -//! Depending on enabled features, it will use different implementations of `ecrecover`. -//! * [`k256`](https://crates.io/crates/k256) - uses maintained pure rust lib `k256`, it is perfect use for no_std environments. -//! * [`secp256k1`](https://crates.io/crates/secp256k1) - uses `bitcoin_secp256k1` lib, it is a C implementation of secp256k1 used in bitcoin core. -//! It is faster than k256 and enabled by default and in std environment. -//! * [`libsecp256k1`](https://crates.io/crates/libsecp256k1) - is made from parity in pure rust, it is alternative for k256. -//! -//! Order of preference is `secp256k1` -> `k256` -> `libsecp256k1`. Where if no features are enabled, it will use `k256`. +//! The implementation uses the `secp256k1` curve to recover the public key from a signature. +//! See [`crypto::secp256k1`](crate::crypto::secp256k1) for the underlying implementations. //! //! Input format: //! [32 bytes for message][64 bytes for signature][1 byte for recovery id] From 716de372351c7a8ca6332176ad70eb9bb7ef9078 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 19:48:13 +0100 Subject: [PATCH 11/16] fmt --- crates/precompile/src/crypto/modexp/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/precompile/src/crypto/modexp/mod.rs b/crates/precompile/src/crypto/modexp/mod.rs index c871e3f966..362aaeeeb5 100644 --- a/crates/precompile/src/crypto/modexp/mod.rs +++ b/crates/precompile/src/crypto/modexp/mod.rs @@ -6,7 +6,7 @@ cfg_if::cfg_if! { if #[cfg(feature = "gmp")] { // Silence aurora-engine-modexp when gmp is enabled use aurora_engine_modexp as _; - + /// GMP-based modular exponentiation implementation pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { use rug::{integer::Order::Msf, Integer}; From acbbc5bdd46d6beefe537f16895111b4664c9046 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 20:11:03 +0100 Subject: [PATCH 12/16] remove constants file --- crates/precompile/src/crypto/blake2/constants.rs | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 crates/precompile/src/crypto/blake2/constants.rs diff --git a/crates/precompile/src/crypto/blake2/constants.rs b/crates/precompile/src/crypto/blake2/constants.rs deleted file mode 100644 index 7b825df187..0000000000 --- a/crates/precompile/src/crypto/blake2/constants.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Constants for Blake2 compression - -/// Length of the Blake2 state vector -pub const STATE_LENGTH: usize = 8; - -/// Length of the Blake2 message block in bytes -pub const MESSAGE_LENGTH: usize = 128; \ No newline at end of file From 17c3d7ecbd6d2a8b310f0786f4d2d6cf6a9f9d52 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 21 Jul 2025 20:43:31 +0100 Subject: [PATCH 13/16] remove mod.rs and use folder_name.rs --- .../src/{crypto/mod.rs => crypto.rs} | 0 .../src/crypto/{blake2/mod.rs => blake2.rs} | 0 .../crypto/{bls12_381/mod.rs => bls12_381.rs} | 52 +++++++++---------- .../src/crypto/{bn128/mod.rs => bn128.rs} | 20 +++---- .../src/crypto/{hash/mod.rs => hash.rs} | 10 +++- .../precompile/src/crypto/hash/constants.rs | 7 --- .../src/crypto/{kzg/mod.rs => kzg.rs} | 0 crates/precompile/src/crypto/modexp.rs | 32 ++++++++++++ crates/precompile/src/crypto/modexp/mod.rs | 33 ------------ .../crypto/{secp256k1/mod.rs => secp256k1.rs} | 0 .../crypto/{secp256r1/mod.rs => secp256r1.rs} | 13 ++++- .../src/crypto/secp256r1/constants.rs | 10 ---- 12 files changed, 89 insertions(+), 88 deletions(-) rename crates/precompile/src/{crypto/mod.rs => crypto.rs} (100%) rename crates/precompile/src/crypto/{blake2/mod.rs => blake2.rs} (100%) rename crates/precompile/src/crypto/{bls12_381/mod.rs => bls12_381.rs} (100%) rename crates/precompile/src/crypto/{bn128/mod.rs => bn128.rs} (100%) rename crates/precompile/src/crypto/{hash/mod.rs => hash.rs} (69%) delete mode 100644 crates/precompile/src/crypto/hash/constants.rs rename crates/precompile/src/crypto/{kzg/mod.rs => kzg.rs} (100%) create mode 100644 crates/precompile/src/crypto/modexp.rs delete mode 100644 crates/precompile/src/crypto/modexp/mod.rs rename crates/precompile/src/crypto/{secp256k1/mod.rs => secp256k1.rs} (100%) rename crates/precompile/src/crypto/{secp256r1/mod.rs => secp256r1.rs} (70%) delete mode 100644 crates/precompile/src/crypto/secp256r1/constants.rs diff --git a/crates/precompile/src/crypto/mod.rs b/crates/precompile/src/crypto.rs similarity index 100% rename from crates/precompile/src/crypto/mod.rs rename to crates/precompile/src/crypto.rs diff --git a/crates/precompile/src/crypto/blake2/mod.rs b/crates/precompile/src/crypto/blake2.rs similarity index 100% rename from crates/precompile/src/crypto/blake2/mod.rs rename to crates/precompile/src/crypto/blake2.rs diff --git a/crates/precompile/src/crypto/bls12_381/mod.rs b/crates/precompile/src/crypto/bls12_381.rs similarity index 100% rename from crates/precompile/src/crypto/bls12_381/mod.rs rename to crates/precompile/src/crypto/bls12_381.rs index bc36b466f6..c50bad0ef5 100644 --- a/crates/precompile/src/crypto/bls12_381/mod.rs +++ b/crates/precompile/src/crypto/bls12_381.rs @@ -2,6 +2,32 @@ pub mod constants; +cfg_if::cfg_if! { + if #[cfg(feature = "blst")]{ + mod blst; + pub use blst::{ + p1_add_affine_bytes, + p2_add_affine_bytes, + p1_msm_bytes, + p2_msm_bytes, + pairing_check_bytes, + map_fp_to_g1_bytes, + map_fp2_to_g2_bytes + }; + } else { + mod arkworks; + pub use arkworks::{ + p1_add_affine_bytes, + p2_add_affine_bytes, + p1_msm_bytes, + p2_msm_bytes, + pairing_check_bytes, + map_fp_to_g1_bytes, + map_fp2_to_g2_bytes + }; + } +} + // silence arkworks-bls12-381 lint as blst will be used as default if both are enabled. cfg_if::cfg_if! { if #[cfg(feature = "blst")]{ @@ -29,29 +55,3 @@ pub type PairingPair = (G1Point, G2Point); pub type G1PointScalarPair = (G1Point, [u8; constants::SCALAR_LENGTH]); /// G2 point paired with a scalar for multi-scalar multiplication pub type G2PointScalarPair = (G2Point, [u8; constants::SCALAR_LENGTH]); - -cfg_if::cfg_if! { - if #[cfg(feature = "blst")]{ - mod blst; - pub use blst::{ - p1_add_affine_bytes, - p2_add_affine_bytes, - p1_msm_bytes, - p2_msm_bytes, - pairing_check_bytes, - map_fp_to_g1_bytes, - map_fp2_to_g2_bytes - }; - } else { - mod arkworks; - pub use arkworks::{ - p1_add_affine_bytes, - p2_add_affine_bytes, - p1_msm_bytes, - p2_msm_bytes, - pairing_check_bytes, - map_fp_to_g1_bytes, - map_fp2_to_g2_bytes - }; - } -} diff --git a/crates/precompile/src/crypto/bn128/mod.rs b/crates/precompile/src/crypto/bn128.rs similarity index 100% rename from crates/precompile/src/crypto/bn128/mod.rs rename to crates/precompile/src/crypto/bn128.rs index 4e735b19a3..51c280b345 100644 --- a/crates/precompile/src/crypto/bn128/mod.rs +++ b/crates/precompile/src/crypto/bn128.rs @@ -2,16 +2,6 @@ pub mod constants; -// silence arkworks lint as bn impl will be used as default if both are enabled. -cfg_if::cfg_if! { - if #[cfg(feature = "bn")]{ - use ark_bn254 as _; - use ark_ff as _; - use ark_ec as _; - use ark_serialize as _; - } -} - cfg_if::cfg_if! { if #[cfg(feature = "bn")]{ mod substrate; @@ -21,3 +11,13 @@ cfg_if::cfg_if! { pub use arkworks::{g1_point_add, g1_point_mul, pairing_check}; } } + +// silence arkworks lint as bn impl will be used as default if both are enabled. +cfg_if::cfg_if! { + if #[cfg(feature = "bn")]{ + use ark_bn254 as _; + use ark_ff as _; + use ark_ec as _; + use ark_serialize as _; + } +} diff --git a/crates/precompile/src/crypto/hash/mod.rs b/crates/precompile/src/crypto/hash.rs similarity index 69% rename from crates/precompile/src/crypto/hash/mod.rs rename to crates/precompile/src/crypto/hash.rs index 9fa2eff7f6..043223da20 100644 --- a/crates/precompile/src/crypto/hash/mod.rs +++ b/crates/precompile/src/crypto/hash.rs @@ -1,6 +1,14 @@ //! Hash function implementations -pub mod constants; +pub mod constants { + //! Constants for hash functions + + /// SHA-256 output length in bytes + pub const SHA256_LENGTH: usize = 32; + + /// RIPEMD-160 output length in bytes + pub const RIPEMD160_LENGTH: usize = 20; +} use sha2::Digest; diff --git a/crates/precompile/src/crypto/hash/constants.rs b/crates/precompile/src/crypto/hash/constants.rs deleted file mode 100644 index 69383df5af..0000000000 --- a/crates/precompile/src/crypto/hash/constants.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Constants for hash functions - -/// SHA-256 output length in bytes -pub const SHA256_LENGTH: usize = 32; - -/// RIPEMD-160 output length in bytes -pub const RIPEMD160_LENGTH: usize = 20; diff --git a/crates/precompile/src/crypto/kzg/mod.rs b/crates/precompile/src/crypto/kzg.rs similarity index 100% rename from crates/precompile/src/crypto/kzg/mod.rs rename to crates/precompile/src/crypto/kzg.rs diff --git a/crates/precompile/src/crypto/modexp.rs b/crates/precompile/src/crypto/modexp.rs new file mode 100644 index 0000000000..c7fa1466e7 --- /dev/null +++ b/crates/precompile/src/crypto/modexp.rs @@ -0,0 +1,32 @@ +//! Modular exponentiation implementations + +use std::vec::Vec; + +// silence aurora-engine-modexp if gmp is enabled +#[cfg(feature = "gmp")] +use aurora_engine_modexp as _; + +#[cfg(feature = "gmp")] +/// GMP-based modular exponentiation implementation +pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + use rug::{integer::Order::Msf, Integer}; + // Convert byte slices to GMP integers + let base_int = Integer::from_digits(base, Msf); + let exp_int = Integer::from_digits(exponent, Msf); + let mod_int = Integer::from_digits(modulus, Msf); + + // Perform modular exponentiation using GMP's pow_mod + let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); + + // Convert result back to bytes + let byte_count = result.significant_bits().div_ceil(8); + let mut output = vec![0u8; byte_count as usize]; + result.write_digits(&mut output, Msf); + output +} + +#[cfg(not(feature = "gmp"))] +/// Aurora engine modular exponentiation implementation +pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + aurora_engine_modexp::modexp(base, exponent, modulus) +} diff --git a/crates/precompile/src/crypto/modexp/mod.rs b/crates/precompile/src/crypto/modexp/mod.rs deleted file mode 100644 index 362aaeeeb5..0000000000 --- a/crates/precompile/src/crypto/modexp/mod.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! Modular exponentiation implementations - -use std::vec::Vec; - -cfg_if::cfg_if! { - if #[cfg(feature = "gmp")] { - // Silence aurora-engine-modexp when gmp is enabled - use aurora_engine_modexp as _; - - /// GMP-based modular exponentiation implementation - pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - use rug::{integer::Order::Msf, Integer}; - // Convert byte slices to GMP integers - let base_int = Integer::from_digits(base, Msf); - let exp_int = Integer::from_digits(exponent, Msf); - let mod_int = Integer::from_digits(modulus, Msf); - - // Perform modular exponentiation using GMP's pow_mod - let result = base_int.pow_mod(&exp_int, &mod_int).unwrap_or_default(); - - // Convert result back to bytes - let byte_count = result.significant_bits().div_ceil(8); - let mut output = vec![0u8; byte_count as usize]; - result.write_digits(&mut output, Msf); - output - } - } else { - /// Aurora engine modular exponentiation implementation - pub fn modexp(base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { - aurora_engine_modexp::modexp(base, exponent, modulus) - } - } -} diff --git a/crates/precompile/src/crypto/secp256k1/mod.rs b/crates/precompile/src/crypto/secp256k1.rs similarity index 100% rename from crates/precompile/src/crypto/secp256k1/mod.rs rename to crates/precompile/src/crypto/secp256k1.rs diff --git a/crates/precompile/src/crypto/secp256r1/mod.rs b/crates/precompile/src/crypto/secp256r1.rs similarity index 70% rename from crates/precompile/src/crypto/secp256r1/mod.rs rename to crates/precompile/src/crypto/secp256r1.rs index 1ea44e137c..b966db9470 100644 --- a/crates/precompile/src/crypto/secp256r1/mod.rs +++ b/crates/precompile/src/crypto/secp256r1.rs @@ -1,6 +1,17 @@ //! secp256r1 (P-256) signature verification -pub mod constants; +pub mod constants { + //! Constants for secp256r1 (P-256) operations + + /// Length of the message hash (32 bytes) + pub const MESSAGE_HASH_LENGTH: usize = 32; + + /// Length of the signature (64 bytes: r || s) + pub const SIGNATURE_LENGTH: usize = 64; + + /// Length of the uncompressed public key (65 bytes: 0x04 || x || y) + pub const PUBKEY_LENGTH: usize = 65; +} use p256::ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}; diff --git a/crates/precompile/src/crypto/secp256r1/constants.rs b/crates/precompile/src/crypto/secp256r1/constants.rs deleted file mode 100644 index 9ed75b8cb9..0000000000 --- a/crates/precompile/src/crypto/secp256r1/constants.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Constants for secp256r1 (P-256) operations - -/// Length of the message hash (32 bytes) -pub const MESSAGE_HASH_LENGTH: usize = 32; - -/// Length of the signature (64 bytes: r || s) -pub const SIGNATURE_LENGTH: usize = 64; - -/// Length of the uncompressed public key (65 bytes: 0x04 || x || y) -pub const PUBKEY_LENGTH: usize = 65; From 4a7b8b673f0fa827834993e32ef9ea5a38f42d12 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 22 Jul 2025 15:44:38 +0100 Subject: [PATCH 14/16] add CryptoProvider trait --- crates/precompile/src/crypto.rs | 276 ++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) diff --git a/crates/precompile/src/crypto.rs b/crates/precompile/src/crypto.rs index ee25bf37b6..51e99fc9ce 100644 --- a/crates/precompile/src/crypto.rs +++ b/crates/precompile/src/crypto.rs @@ -3,6 +3,11 @@ //! This module contains pure cryptographic implementations used by various precompiles. //! The precompile and Ethereum specific logic (addresses, gas costs, input parsing, evm padding) remains in the parent modules. +use crate::PrecompileError; +use once_cell::race::OnceBox; +use std::boxed::Box; +use std::vec::Vec; + /// BN128 elliptic curve operations pub mod bn128; @@ -27,3 +32,274 @@ pub mod secp256k1; /// secp256r1 (P-256) elliptic curve operations pub mod secp256r1; + +// Import constants and types needed by the trait +use bls12_381::constants::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH}; +use bls12_381::{G1Point, G2Point, PairingPair}; +use primitives::{alloy_primitives::B512, B256}; + +/// Trait for cryptographic operations used by precompiles. +pub trait CryptoProvider: Send + Sync + 'static { + /// BN128 elliptic curve addition. + fn bn128_g1_add(&self, p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError>; + + /// BN128 elliptic curve scalar multiplication. + fn bn128_g1_mul( + &self, + point_bytes: &[u8], + fr_bytes: &[u8], + ) -> Result<[u8; 64], PrecompileError>; + + /// BN128 pairing check. + fn bn128_pairing_check(&self, pairs: &[(&[u8], &[u8])]) -> Result; + + /// BLS12-381 G1 point addition. + fn bls12_381_g1_add(&self, a: G1Point, b: G1Point) -> Result<[u8; G1_LENGTH], PrecompileError>; + + /// BLS12-381 G2 point addition. + fn bls12_381_g2_add(&self, a: G2Point, b: G2Point) -> Result<[u8; G2_LENGTH], PrecompileError>; + + /// BLS12-381 G1 multi-scalar multiplication. + fn bls12_381_g1_msm( + &self, + points_scalars: Box< + dyn Iterator> + '_, + >, + ) -> Result<[u8; G1_LENGTH], PrecompileError>; + + /// BLS12-381 G2 multi-scalar multiplication. + fn bls12_381_g2_msm( + &self, + points_scalars: Box< + dyn Iterator> + '_, + >, + ) -> Result<[u8; G2_LENGTH], PrecompileError>; + + /// BLS12-381 pairing check. + fn bls12_381_pairing_check(&self, pairs: &[PairingPair]) -> Result; + + /// BLS12-381 map field element to G1. + fn bls12_381_fp_to_g1( + &self, + fp_bytes: &[u8; FP_LENGTH], + ) -> Result<[u8; G1_LENGTH], PrecompileError>; + + /// BLS12-381 map field element to G2. + fn bls12_381_fp2_to_g2( + &self, + fp2_x: &[u8; FP_LENGTH], + fp2_y: &[u8; FP_LENGTH], + ) -> Result<[u8; G2_LENGTH], PrecompileError>; + + /// KZG point evaluation. + #[cfg(any(feature = "c-kzg", feature = "kzg-rs"))] + fn verify_kzg_proof( + &self, + commitment: &[u8; 48], + z: &[u8; 32], + y: &[u8; 32], + proof: &[u8; 48], + ) -> bool; + + /// secp256k1 ECDSA signature recovery. + fn ecrecover(&self, sig: &B512, recid: u8, msg: &B256) -> Option; + + /// secp256r1 (P-256) signature verification. + fn secp256r1_verify_signature( + &self, + msg: &[u8; secp256r1::constants::MESSAGE_HASH_LENGTH], + sig: &[u8; secp256r1::constants::SIGNATURE_LENGTH], + pk: &[u8; secp256r1::constants::PUBKEY_LENGTH], + ) -> Option<()>; + + /// Modular exponentiation. + /// + /// Computes base^exponent mod modulus. + /// + /// # Arguments + /// * `base` - The base value + /// * `exponent` - The exponent value + /// * `modulus` - The modulus value + /// + /// # Returns + /// The result of the modular exponentiation. + fn modexp(&self, base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec; + + /// SHA-256 hash function. + /// + /// Computes the SHA-256 hash of the input data. + /// + /// # Arguments + /// * `input` - The input data to hash + /// + /// # Returns + /// The SHA-256 hash as 32 bytes. + fn sha256(&self, input: &[u8]) -> [u8; 32]; + + /// RIPEMD-160 hash function. + /// + /// Computes the RIPEMD-160 hash of the input data. + /// + /// # Arguments + /// * `input` - The input data to hash + /// + /// # Returns + /// The RIPEMD-160 hash as 32 bytes (20 bytes hash + 12 bytes zero padding). + fn ripemd160(&self, input: &[u8]) -> [u8; 32]; + + /// Blake2 compression function. + fn blake2_compress(&self, rounds: usize, h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool); +} + +/// Default crypto provider using the existing implementations +#[derive(Debug, Clone)] +pub struct DefaultCryptoProvider; + +impl CryptoProvider for DefaultCryptoProvider { + fn bn128_g1_add(&self, p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { + bn128::g1_point_add(p1_bytes, p2_bytes) + } + + fn bn128_g1_mul( + &self, + point_bytes: &[u8], + fr_bytes: &[u8], + ) -> Result<[u8; 64], PrecompileError> { + bn128::g1_point_mul(point_bytes, fr_bytes) + } + + fn bn128_pairing_check(&self, pairs: &[(&[u8], &[u8])]) -> Result { + bn128::pairing_check(pairs) + } + + fn bls12_381_g1_add(&self, a: G1Point, b: G1Point) -> Result<[u8; G1_LENGTH], PrecompileError> { + bls12_381::p1_add_affine_bytes(a, b) + } + + fn bls12_381_g2_add(&self, a: G2Point, b: G2Point) -> Result<[u8; G2_LENGTH], PrecompileError> { + bls12_381::p2_add_affine_bytes(a, b) + } + + fn bls12_381_g1_msm( + &self, + points_scalars: Box< + dyn Iterator> + '_, + >, + ) -> Result<[u8; G1_LENGTH], PrecompileError> { + bls12_381::p1_msm_bytes(points_scalars) + } + + fn bls12_381_g2_msm( + &self, + points_scalars: Box< + dyn Iterator> + '_, + >, + ) -> Result<[u8; G2_LENGTH], PrecompileError> { + bls12_381::p2_msm_bytes(points_scalars) + } + + fn bls12_381_pairing_check(&self, pairs: &[PairingPair]) -> Result { + bls12_381::pairing_check_bytes(pairs) + } + + fn bls12_381_fp_to_g1( + &self, + fp_bytes: &[u8; FP_LENGTH], + ) -> Result<[u8; G1_LENGTH], PrecompileError> { + bls12_381::map_fp_to_g1_bytes(fp_bytes) + } + + fn bls12_381_fp2_to_g2( + &self, + fp2_x: &[u8; FP_LENGTH], + fp2_y: &[u8; FP_LENGTH], + ) -> Result<[u8; G2_LENGTH], PrecompileError> { + bls12_381::map_fp2_to_g2_bytes(fp2_x, fp2_y) + } + + #[cfg(any(feature = "c-kzg", feature = "kzg-rs"))] + fn verify_kzg_proof( + &self, + commitment: &[u8; 48], + z: &[u8; 32], + y: &[u8; 32], + proof: &[u8; 48], + ) -> bool { + kzg::verify_kzg_proof(commitment, z, y, proof) + } + + fn ecrecover(&self, sig: &B512, recid: u8, msg: &B256) -> Option { + secp256k1::ecrecover(sig, recid, msg).ok() + } + + fn secp256r1_verify_signature( + &self, + msg: &[u8; secp256r1::constants::MESSAGE_HASH_LENGTH], + sig: &[u8; secp256r1::constants::SIGNATURE_LENGTH], + pk: &[u8; secp256r1::constants::PUBKEY_LENGTH], + ) -> Option<()> { + secp256r1::verify_signature(msg, sig, pk) + } + + fn modexp(&self, base: &[u8], exponent: &[u8], modulus: &[u8]) -> Vec { + modexp::modexp(base, exponent, modulus) + } + + fn sha256(&self, input: &[u8]) -> [u8; 32] { + hash::sha256(input) + } + + fn ripemd160(&self, input: &[u8]) -> [u8; 32] { + hash::ripemd160(input) + } + + fn blake2_compress(&self, rounds: usize, h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool) { + blake2::compress(rounds, h, m, t, f); + } +} + +/// Global crypto provider instance +static PROVIDER: OnceBox> = OnceBox::new(); + +/// Install a custom crypto provider globally. +/// +/// # Arguments +/// * `provider` - The crypto provider implementation to use +/// +/// # Returns +/// `true` if the provider was installed successfully, `false` if a provider was already installed. +/// +/// # Example +/// ```ignore +/// use revm_precompile::crypto::{install_provider, CryptoProvider}; +/// +/// struct MyProvider; +/// impl CryptoProvider for MyProvider { +/// // ... implementation +/// } +/// +/// if !install_provider(MyProvider) { +/// println!("Provider already installed"); +/// } +/// ``` +pub fn install_provider(provider: P) -> bool { + PROVIDER.set(Box::new(Box::new(provider))).is_ok() +} + +/// Get the installed crypto provider, or the default if none is installed. +pub fn get_provider() -> &'static dyn CryptoProvider { + PROVIDER + .get_or_init(|| Box::new(Box::new(DefaultCryptoProvider))) + .as_ref() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_provider() { + let result = get_provider().sha256(b"test"); + assert_eq!(result.len(), 32); + } +} From 11e712638edbef77edd39db39e480490619ebc69 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 22 Jul 2025 15:45:05 +0100 Subject: [PATCH 15/16] use default impl from trait --- crates/precompile/src/blake2.rs | 2 +- crates/precompile/src/bls12_381/g1_add.rs | 2 +- crates/precompile/src/bls12_381/g1_msm.rs | 3 ++- crates/precompile/src/bls12_381/g2_add.rs | 2 +- crates/precompile/src/bls12_381/g2_msm.rs | 3 ++- crates/precompile/src/bls12_381/map_fp2_to_g2.rs | 3 ++- crates/precompile/src/bls12_381/map_fp_to_g1.rs | 2 +- crates/precompile/src/bls12_381/pairing.rs | 2 +- crates/precompile/src/bn128.rs | 6 +++--- crates/precompile/src/hash.rs | 4 ++-- crates/precompile/src/kzg_point_evaluation.rs | 2 +- crates/precompile/src/modexp.rs | 2 +- crates/precompile/src/secp256k1.rs | 7 +++---- crates/precompile/src/secp256r1.rs | 2 +- 14 files changed, 22 insertions(+), 20 deletions(-) diff --git a/crates/precompile/src/blake2.rs b/crates/precompile/src/blake2.rs index 7de50f9925..4bbedb2468 100644 --- a/crates/precompile/src/blake2.rs +++ b/crates/precompile/src/blake2.rs @@ -53,7 +53,7 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { let t_0 = u64::from_le_bytes(input[196..204].try_into().unwrap()); let t_1 = u64::from_le_bytes(input[204..212].try_into().unwrap()); - crate::crypto::blake2::compress(rounds, &mut h, m, [t_0, t_1], f); + crate::crypto::get_provider().blake2_compress(rounds, &mut h, m, [t_0, t_1], f); let mut out = [0u8; 64]; for (i, h) in (0..64).step_by(8).zip(h.iter()) { diff --git a/crates/precompile/src/bls12_381/g1_add.rs b/crates/precompile/src/bls12_381/g1_add.rs index b43e09c3f5..fd5c9af9ab 100644 --- a/crates/precompile/src/bls12_381/g1_add.rs +++ b/crates/precompile/src/bls12_381/g1_add.rs @@ -33,7 +33,7 @@ pub fn g1_add(input: &[u8], gas_limit: u64) -> PrecompileResult { let b = (*b_x, *b_y); // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::p1_add_affine_bytes(a, b)?; + let unpadded_result = crate::crypto::get_provider().bls12_381_g1_add(a, b)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index 47e9f3f0c0..441884d2d7 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -46,7 +46,8 @@ pub fn g1_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = crate::crypto::bls12_381::p1_msm_bytes(valid_pairs_iter)?; + let unpadded_result = + crate::crypto::get_provider().bls12_381_g1_msm(Box::new(valid_pairs_iter))?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g2_add.rs b/crates/precompile/src/bls12_381/g2_add.rs index 7e1bbfc148..9eafc4ff34 100644 --- a/crates/precompile/src/bls12_381/g2_add.rs +++ b/crates/precompile/src/bls12_381/g2_add.rs @@ -34,7 +34,7 @@ pub fn g2_add(input: &[u8], gas_limit: u64) -> PrecompileResult { let b = (*b_x_0, *b_x_1, *b_y_0, *b_y_1); // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::p2_add_affine_bytes(a, b)?; + let unpadded_result = crate::crypto::get_provider().bls12_381_g2_add(a, b)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 726e3282d6..7cfdf9d26b 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -46,7 +46,8 @@ pub fn g2_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { Ok((point, scalar_array)) }); - let unpadded_result = crate::crypto::bls12_381::p2_msm_bytes(valid_pairs_iter)?; + let unpadded_result = + crate::crypto::get_provider().bls12_381_g2_msm(Box::new(valid_pairs_iter))?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs index 3a38a35d6d..77df5245d4 100644 --- a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs +++ b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs @@ -30,7 +30,8 @@ pub fn map_fp2_to_g2(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0_y = remove_fp_padding(&input[PADDED_FP_LENGTH..PADDED_FP2_LENGTH])?; // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::map_fp2_to_g2_bytes(input_p0_x, input_p0_y)?; + let unpadded_result = + crate::crypto::get_provider().bls12_381_fp2_to_g2(input_p0_x, input_p0_y)?; // Pad the result for EVM compatibility let padded_result = pad_g2_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/map_fp_to_g1.rs b/crates/precompile/src/bls12_381/map_fp_to_g1.rs index dede1947e3..16bdef5cc8 100644 --- a/crates/precompile/src/bls12_381/map_fp_to_g1.rs +++ b/crates/precompile/src/bls12_381/map_fp_to_g1.rs @@ -25,7 +25,7 @@ pub fn map_fp_to_g1(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0 = remove_fp_padding(input)?; // Get unpadded result from crypto backend - let unpadded_result = crate::crypto::bls12_381::map_fp_to_g1_bytes(input_p0)?; + let unpadded_result = crate::crypto::get_provider().bls12_381_fp_to_g1(input_p0)?; // Pad the result for EVM compatibility let padded_result = pad_g1_point(&unpadded_result); diff --git a/crates/precompile/src/bls12_381/pairing.rs b/crates/precompile/src/bls12_381/pairing.rs index 92ccb11e8e..7ccb51e9b4 100644 --- a/crates/precompile/src/bls12_381/pairing.rs +++ b/crates/precompile/src/bls12_381/pairing.rs @@ -52,7 +52,7 @@ pub fn pairing(input: &[u8], gas_limit: u64) -> PrecompileResult { pairs.push(((*a_x, *a_y), (*b_x_0, *b_x_1, *b_y_0, *b_y_1))); } - let result = crate::crypto::bls12_381::pairing_check_bytes(&pairs)?; + let result = crate::crypto::get_provider().bls12_381_pairing_check(&pairs)?; let result = if result { 1 } else { 0 }; Ok(PrecompileOutput::new( diff --git a/crates/precompile/src/bn128.rs b/crates/precompile/src/bn128.rs index 7643dee140..c9260bc75a 100644 --- a/crates/precompile/src/bn128.rs +++ b/crates/precompile/src/bn128.rs @@ -148,7 +148,7 @@ pub fn run_add(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let p1_bytes = &input[..G1_LEN]; let p2_bytes = &input[G1_LEN..]; - let output = crate::crypto::bn128::g1_point_add(p1_bytes, p2_bytes)?; + let output = crate::crypto::get_provider().bn128_g1_add(p1_bytes, p2_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -163,7 +163,7 @@ pub fn run_mul(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let point_bytes = &input[..G1_LEN]; let scalar_bytes = &input[G1_LEN..G1_LEN + SCALAR_LEN]; - let output = crate::crypto::bn128::g1_point_mul(point_bytes, scalar_bytes)?; + let output = crate::crypto::get_provider().bn128_g1_mul(point_bytes, scalar_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -202,7 +202,7 @@ pub fn run_pair( points.push((encoded_g1_element, encoded_g2_element)); } - let pairing_result = crate::crypto::bn128::pairing_check(&points)?; + let pairing_result = crate::crypto::get_provider().bn128_pairing_check(&points)?; Ok(PrecompileOutput::new( gas_used, bool_to_bytes32(pairing_result), diff --git a/crates/precompile/src/hash.rs b/crates/precompile/src/hash.rs index a7e36dab5c..5447663539 100644 --- a/crates/precompile/src/hash.rs +++ b/crates/precompile/src/hash.rs @@ -22,7 +22,7 @@ pub fn sha256_run(input: &[u8], gas_limit: u64) -> PrecompileResult { if cost > gas_limit { Err(PrecompileError::OutOfGas) } else { - let output = crate::crypto::hash::sha256(input); + let output = crate::crypto::get_provider().sha256(input); Ok(PrecompileOutput::new(cost, output.to_vec().into())) } } @@ -38,7 +38,7 @@ pub fn ripemd160_run(input: &[u8], gas_limit: u64) -> PrecompileResult { if gas_used > gas_limit { Err(PrecompileError::OutOfGas) } else { - let output = crate::crypto::hash::ripemd160(input); + let output = crate::crypto::get_provider().ripemd160(input); Ok(PrecompileOutput::new(gas_used, output.to_vec().into())) } } diff --git a/crates/precompile/src/kzg_point_evaluation.rs b/crates/precompile/src/kzg_point_evaluation.rs index a28c386d9e..c510911518 100644 --- a/crates/precompile/src/kzg_point_evaluation.rs +++ b/crates/precompile/src/kzg_point_evaluation.rs @@ -52,7 +52,7 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { let z = input[32..64].try_into().unwrap(); let y = input[64..96].try_into().unwrap(); let proof = input[144..192].try_into().unwrap(); - if !crate::crypto::kzg::verify_kzg_proof(commitment, z, y, proof) { + if !crate::crypto::get_provider().verify_kzg_proof(commitment, z, y, proof) { return Err(PrecompileError::BlobVerifyKzgProofFailed); } diff --git a/crates/precompile/src/modexp.rs b/crates/precompile/src/modexp.rs index 1686acef2d..5ea407a75b 100644 --- a/crates/precompile/src/modexp.rs +++ b/crates/precompile/src/modexp.rs @@ -133,7 +133,7 @@ where debug_assert_eq!(modulus.len(), mod_len); // Call the modexp. - let output = crate::crypto::modexp::modexp(base, exponent, modulus); + let output = crate::crypto::get_provider().modexp(base, exponent, modulus); // Left pad the result to modulus length. bytes will always by less or equal to modulus length. Ok(PrecompileOutput::new( diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index a8e41b6a94..8f1c59a247 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -47,8 +47,7 @@ fn ecrecover_bytes(sig: [u8; 64], recid: u8, msg: [u8; 32]) -> Option<[u8; 32]> let sig = B512::from(sig); let msg = B256::from(msg); - match crate::crypto::secp256k1::ecrecover(&sig, recid, &msg) { - Ok(address) => Some(address.0), - Err(_) => None, - } + crate::crypto::get_provider() + .ecrecover(&sig, recid, &msg) + .map(|address| address.0) } diff --git a/crates/precompile/src/secp256r1.rs b/crates/precompile/src/secp256r1.rs index 86c381ee9a..e60b2db228 100644 --- a/crates/precompile/src/secp256r1.rs +++ b/crates/precompile/src/secp256r1.rs @@ -90,7 +90,7 @@ pub fn verify_impl(input: &[u8]) -> Option<()> { uncompressed_pk[0] = 0x04; uncompressed_pk[1..].copy_from_slice(pk); - crate::crypto::secp256r1::verify_signature(&msg.0, &sig.0, &uncompressed_pk) + crate::crypto::get_provider().secp256r1_verify_signature(&msg.0, &sig.0, &uncompressed_pk) } #[cfg(test)] From 3d00601df95ae699ecf8f344abb59a841d562e59 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Wed, 23 Jul 2025 13:40:49 +0100 Subject: [PATCH 16/16] box --- crates/precompile/src/bls12_381/g1_msm.rs | 1 + crates/precompile/src/bls12_381/g2_msm.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index 441884d2d7..33ec02d4fa 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -7,6 +7,7 @@ use crate::bls12_381_const::{ }; use crate::bls12_381_utils::msm_required_gas; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; +use std::boxed::Box; /// [EIP-2537](https://eips.ethereum.org/EIPS/eip-2537#specification) BLS12_G1MSM precompile. pub const PRECOMPILE: PrecompileWithAddress = PrecompileWithAddress(G1_MSM_ADDRESS, g1_msm); diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 7cfdf9d26b..d1ac3c5a6b 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -7,6 +7,7 @@ use crate::bls12_381_const::{ }; use crate::bls12_381_utils::msm_required_gas; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; +use std::boxed::Box; /// [EIP-2537](https://eips.ethereum.org/EIPS/eip-2537#specification) BLS12_G2MSM precompile. pub const PRECOMPILE: PrecompileWithAddress = PrecompileWithAddress(G2_MSM_ADDRESS, g2_msm);