Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor apply_inputs and support converting block for v2 compatibility #3409

Merged
merged 6 commits into from
Jul 31, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 95 additions & 26 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, KernelFeatures, Output, OutputIdentifier,
Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier,
Transaction, TxKernel,
};
use crate::core::global;
Expand Down Expand Up @@ -271,6 +271,41 @@ impl Chain {
res
}

/// We plan to support receiving blocks with CommitOnly inputs.
/// We also need to support relaying blocks with FeaturesAndCommit inputs to peers.
/// So we need a way to convert blocks from CommitOnly to FeaturesAndCommit.
/// Validating the inputs against the utxo_view allows us to look the outputs up.
fn convert_block_v2(&self, block: Block) -> Result<Block, Error> {
debug!(
"convert_block_v2: {} at {}",
block.header.hash(),
block.header.height
);

if block.inputs().is_empty() {
return Ok(Block {
header: block.header,
body: block.body.replace_inputs(Inputs::FeaturesAndCommit(vec![])),
});
}

let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let outputs =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
let previous_header = batch.get_previous_header(&block.header)?;
pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
ext.extension
.utxo_view(ext.header_extension)
.validate_inputs(block.inputs(), batch)
})?;
let outputs: Vec<_> = outputs.into_iter().map(|(out, _)| out).collect();
Ok(Block {
header: block.header,
body: block.body.replace_inputs(outputs.into()),
})
}

fn determine_status(&self, head: Option<Tip>, prev_head: Tip, fork_point: Tip) -> BlockStatus {
// If head is updated then we are either "next" block or we just experienced a "reorg" to new head.
// Otherwise this is a "fork" off the main chain.
Expand All @@ -291,15 +326,69 @@ impl Chain {
}
}

/// Quick check for "known" duplicate block up to and including current chain head.
fn is_known(&self, header: &BlockHeader) -> Result<(), Error> {
let head = self.head()?;
if head.hash() == header.hash() {
return Err(ErrorKind::Unfit("duplicate block".into()).into());
}
if header.total_difficulty() <= head.total_difficulty {
if self.block_exists(header.hash())? {
return Err(ErrorKind::Unfit("duplicate block".into()).into());
}
}
Ok(())
}

// Check if the provided block is an orphan.
// If block is an orphan add it to our orphan block pool for deferred processing.
fn check_orphan(&self, block: &Block, opts: Options) -> Result<(), Error> {
if self.block_exists(block.header.prev_hash)? {
return Ok(());
}

let block_hash = block.hash();
let orphan = Orphan {
block: block.clone(),
opts,
added: Instant::now(),
};
self.orphans.add(orphan);

debug!(
"is_orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
if self.orphans.len_evicted() > 0 {
format!(", # evicted {}", self.orphans.len_evicted())
} else {
String::new()
},
);

Err(ErrorKind::Orphan.into())
}

/// Attempt to add a new block to the chain.
/// Returns true if it has been added to the longest chain
/// or false if it has added to a fork (or orphan?).
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
// Check if we already know about this block.
self.is_known(&b.header)?;

// Process the header first.
// If invalid then fail early.
// If valid then continue with block processing with header_head committed to db etc.
self.process_block_header(&b.header, opts)?;

// Check if this block is an orphan.
// Only do this once we know the header PoW is valid.
self.check_orphan(&b, opts)?;

// Convert block to FeaturesAndCommit inputs.
// We know this block is not an orphan and header is valid at this point.
let b = self.convert_block_v2(b)?;

let (maybe_new_head, prev_head) = {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
Expand Down Expand Up @@ -331,28 +420,6 @@ impl Chain {
Ok(head)
}
Err(e) => match e.kind() {
ErrorKind::Orphan => {
let block_hash = b.hash();
let orphan = Orphan {
block: b,
opts: opts,
added: Instant::now(),
};

self.orphans.add(orphan);

debug!(
"process_block: orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
if self.orphans.len_evicted() > 0 {
format!(", # evicted {}", self.orphans.len_evicted())
} else {
String::new()
},
);
Err(ErrorKind::Orphan.into())
}
ErrorKind::Unfit(ref msg) => {
debug!(
"Block {} at {} is unfit at this time: {}",
Expand Down Expand Up @@ -544,7 +611,8 @@ impl Chain {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.validate_tx(tx, batch)
utxo.validate_tx(tx, batch)?;
Ok(())
})
}

Expand Down Expand Up @@ -617,7 +685,7 @@ impl Chain {
let prev_root = header_extension.root()?;

// Apply the latest block to the chain state via the extension.
extension.apply_block(b, batch)?;
extension.apply_block(b, header_extension, batch)?;

Ok((prev_root, extension.roots()?, extension.sizes()))
})?;
Expand Down Expand Up @@ -1576,7 +1644,8 @@ fn setup_head(
};
}
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
ext.extension.apply_block(&genesis, batch)
ext.extension
.apply_block(&genesis, ext.header_extension, batch)
})?;

// Save the block_sums to the db for use later.
Expand Down
28 changes: 9 additions & 19 deletions chain/src/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,13 @@ use crate::core::consensus;
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::Committed;
use crate::core::core::{Block, BlockHeader, BlockSums};
use crate::core::core::{Block, BlockHeader, BlockSums, OutputIdentifier};
use crate::core::pow;
use crate::error::{Error, ErrorKind};
use crate::store;
use crate::txhashset;
use crate::types::{Options, Tip};
use crate::types::{CommitPos, Options, Tip};
use crate::util::RwLock;
use grin_store;
use std::sync::Arc;

/// Contextual information required to process a new block and either reject or
Expand Down Expand Up @@ -104,17 +103,9 @@ pub fn process_block(
// want to do this now and not later during header validation.
validate_pow_only(&b.header, ctx)?;

// Get previous header from the db and check we have the corresponding full block.
let prev = prev_header_store(&b.header, &mut ctx.batch)?;

// Block is an orphan if we do not know about the previous full block.
// Skip this check if we have just processed the previous block
// or the full txhashset state (fast sync) at the previous block height.
{
let is_next = b.header.prev_hash == head.last_block_h;
if !is_next && !ctx.batch.block_exists(&prev.hash())? {
return Err(ErrorKind::Orphan.into());
}
}
ctx.batch.block_exists(&prev.hash())?;

// Process the header for the block.
// Note: We still want to process the full block if we have seen this header before
Expand Down Expand Up @@ -319,10 +310,7 @@ fn prev_header_store(
header: &BlockHeader,
batch: &mut store::Batch<'_>,
) -> Result<BlockHeader, Error> {
let prev = batch.get_previous_header(&header).map_err(|e| match e {
grin_store::Error::NotFoundErr(_) => ErrorKind::Orphan,
_ => ErrorKind::StoreErr(e, "check prev header".into()),
})?;
let prev = batch.get_previous_header(&header)?;
Ok(prev)
}

Expand Down Expand Up @@ -454,7 +442,8 @@ fn apply_block_to_txhashset(
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
ext.extension.apply_block(block, batch)?;
ext.extension
.apply_block(block, ext.header_extension, batch)?;
ext.extension.validate_roots(&block.header)?;
ext.extension.validate_sizes(&block.header)?;
Ok(())
Expand Down Expand Up @@ -600,11 +589,12 @@ pub fn rewind_and_apply_fork(
Ok(fork_point)
}

/// Validate block inputs against utxo.
fn validate_utxo(
block: &Block,
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
let extension = &ext.extension;
let header_extension = &ext.header_extension;
extension
Expand Down
17 changes: 6 additions & 11 deletions chain/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ impl ChainStore {
/// Get PMMR pos for the given output commitment.
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
match self.get_output_pos_height(commit)? {
Some((pos, _)) => Ok(pos),
Some(pos) => Ok(pos.pos),
None => Err(Error::NotFoundErr(format!(
"Output position for: {:?}",
commit
Expand All @@ -134,7 +134,7 @@ impl ChainStore {
}

/// Get PMMR pos and block height for the given output commitment.
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> {
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<CommitPos>, Error> {
self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit))
}

Expand Down Expand Up @@ -258,14 +258,9 @@ impl<'a> Batch<'a> {
}

/// Save output_pos and block height to index.
pub fn save_output_pos_height(
&self,
commit: &Commitment,
pos: u64,
height: u64,
) -> Result<(), Error> {
pub fn save_output_pos_height(&self, commit: &Commitment, pos: CommitPos) -> Result<(), Error> {
self.db
.put_ser(&to_key(OUTPUT_POS_PREFIX, commit)[..], &(pos, height))
.put_ser(&to_key(OUTPUT_POS_PREFIX, commit)[..], &pos)
}

/// Delete the output_pos index entry for a spent output.
Expand All @@ -290,7 +285,7 @@ impl<'a> Batch<'a> {
/// Get output_pos from index.
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
match self.get_output_pos_height(commit)? {
Some((pos, _)) => Ok(pos),
Some(pos) => Ok(pos.pos),
None => Err(Error::NotFoundErr(format!(
"Output position for: {:?}",
commit
Expand All @@ -299,7 +294,7 @@ impl<'a> Batch<'a> {
}

/// Get output_pos and block height from index.
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> {
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<CommitPos>, Error> {
self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit))
}

Expand Down
Loading