Skip to content

Commit

Permalink
Optimize POW read (#3035)
Browse files Browse the repository at this point in the history
* Optimize POW read

This functionality is used to deserialize header (from network or from
DB), it was taking up to 40% cpu time during initial header sync. This
PR brings it down to 3-4%. Function read_number would look better as
closure, unfortunately the compliler doesn't inline it in this case, so
it would be 2x slower.

* Remove unused code
  • Loading branch information
hashmap authored Sep 10, 2019
1 parent 80a8f76 commit d90b1c2
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 27 deletions.
4 changes: 2 additions & 2 deletions core/src/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ use crate::util::RwLock;
pub const PROTOCOL_VERSION: u32 = 1;

/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 9;
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;

/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4;
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;

/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
Expand Down
42 changes: 23 additions & 19 deletions core/src/pow/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,23 @@ impl Proof {
}
}

#[inline(always)]
fn read_number(bits: &Vec<u8>, bit_start: usize, bit_count: usize) -> u64 {
if bit_count == 0 {
return 0;
}
let mut buf: [u8; 8] = [0; 8];
let mut byte_start = bit_start / 8;
if byte_start + 8 > bits.len() {
byte_start = bits.len() - 8;
}
buf.copy_from_slice(&bits[byte_start..byte_start + 8]);
buf.reverse();
let mut nonce = u64::from_be_bytes(buf);
nonce = nonce << 64 - (bit_start - byte_start * 8) - bit_count;
nonce >> 64 - bit_count
}

impl Readable for Proof {
fn read(reader: &mut dyn Reader) -> Result<Proof, ser::Error> {
let edge_bits = reader.read_u8()?;
Expand All @@ -405,24 +422,15 @@ impl Readable for Proof {
let bytes_len = BitVec::bytes_len(bits_len);
let bits = reader.read_fixed_bytes(bytes_len)?;

// set our nonces from what we read in the bitvec
let bitvec = BitVec { bits };
for n in 0..global::proofsize() {
let mut nonce = 0;
for bit in 0..nonce_bits {
if bitvec.bit_at(n * nonce_bits + (bit as usize)) {
nonce |= 1 << bit;
}
}
nonces.push(nonce);
nonces.push(read_number(&bits, n * nonce_bits, nonce_bits));
}

// check the last bits of the last byte are zeroed, we don't use them but
// still better to enforce to avoid any malleability
for n in bits_len..(bytes_len * 8) {
if bitvec.bit_at(n) {
return Err(ser::Error::CorruptedData);
}
//// check the last bits of the last byte are zeroed, we don't use them but
//// still better to enforce to avoid any malleability
let end_of_data = global::proofsize() * nonce_bits;
if read_number(&bits, end_of_data, bytes_len * 8 - end_of_data) != 0 {
return Err(ser::Error::CorruptedData);
}

Ok(Proof { edge_bits, nonces })
Expand Down Expand Up @@ -469,8 +477,4 @@ impl BitVec {
fn set_bit_at(&mut self, pos: usize) {
self.bits[pos / 8] |= 1 << (pos % 8) as u8;
}

fn bit_at(&self, pos: usize) -> bool {
self.bits[pos / 8] & (1 << (pos % 8) as u8) != 0
}
}
12 changes: 6 additions & 6 deletions core/tests/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ fn empty_block_serialized_size() {
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
let target_len = 1_107;
let target_len = 1_112;
assert_eq!(vec.len(), target_len);
}

Expand All @@ -284,7 +284,7 @@ fn block_single_tx_serialized_size() {
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
let target_len = 2_689;
let target_len = 2_694;
assert_eq!(vec.len(), target_len);
}

Expand All @@ -299,7 +299,7 @@ fn empty_compact_block_serialized_size() {
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
let target_len = 1_115;
let target_len = 1_120;
assert_eq!(vec.len(), target_len);
}

Expand All @@ -315,7 +315,7 @@ fn compact_block_single_tx_serialized_size() {
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
let target_len = 1_121;
let target_len = 1_126;
assert_eq!(vec.len(), target_len);
}

Expand All @@ -335,7 +335,7 @@ fn block_10_tx_serialized_size() {
let b = new_block(txs.iter().collect(), &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
let target_len = 16_927;
let target_len = 16_932;
assert_eq!(vec.len(), target_len,);
}

Expand All @@ -356,7 +356,7 @@ fn compact_block_10_tx_serialized_size() {
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
let target_len = 1_175;
let target_len = 1_180;
assert_eq!(vec.len(), target_len,);
}

Expand Down

0 comments on commit d90b1c2

Please sign in to comment.