Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/milestone/2.x.x'
Browse files Browse the repository at this point in the history
  • Loading branch information
antiochp committed Jul 24, 2019
2 parents eff2472 + 16af577 commit 441e846
Show file tree
Hide file tree
Showing 81 changed files with 1,537 additions and 1,308 deletions.
19 changes: 4 additions & 15 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ failure_derive = "0.1"

grin_api = { path = "./api", version = "2.0.1-beta.1" }
grin_config = { path = "./config", version = "2.0.1-beta.1" }
grin_chain = { path = "./chain", version = "2.0.1-beta.1" }
grin_core = { path = "./core", version = "2.0.1-beta.1" }
grin_keychain = { path = "./keychain", version = "2.0.1-beta.1" }
grin_p2p = { path = "./p2p", version = "2.0.1-beta.1" }
Expand Down
13 changes: 6 additions & 7 deletions api/src/handlers/pool_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
use super::utils::w;
use crate::core::core::hash::Hashed;
use crate::core::core::Transaction;
use crate::core::ser;
use crate::core::ser::{self, ProtocolVersion};
use crate::pool;
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
Expand Down Expand Up @@ -64,7 +64,6 @@ impl PoolPushHandler {

let fluff = params.get("fluff").is_some();
let pool_arc = match w(&self.tx_pool) {
//w(&self.tx_pool).clone();
Ok(p) => p,
Err(e) => return Box::new(err(e)),
};
Expand All @@ -76,14 +75,14 @@ impl PoolPushHandler {
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
})
.and_then(move |tx_bin| {
ser::deserialize(&mut &tx_bin[..])
// TODO - pass protocol version in via the api call?
let version = ProtocolVersion::local();

ser::deserialize(&mut &tx_bin[..], version)
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
})
.and_then(move |tx: Transaction| {
let source = pool::TxSource {
debug_name: "push-api".to_string(),
identifier: "?.?.?.?".to_string(),
};
let source = pool::TxSource::PushApi;
info!(
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(),
Expand Down
4 changes: 2 additions & 2 deletions api/src/handlers/transactions_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ impl TxHashSetHandler {
Ok(TxHashSetNode::get_last_n_output(w(&self.chain)?, distance))
}

// gets last n outputs inserted in to the tree
// gets last n rangeproofs inserted in to the tree
fn get_last_n_rangeproof(&self, distance: u64) -> Result<Vec<TxHashSetNode>, Error> {
Ok(TxHashSetNode::get_last_n_rangeproof(
w(&self.chain)?,
distance,
))
}

// gets last n outputs inserted in to the tree
// gets last n kernels inserted in to the tree
fn get_last_n_kernel(&self, distance: u64) -> Result<Vec<TxHashSetNode>, Error> {
Ok(TxHashSetNode::get_last_n_kernel(w(&self.chain)?, distance))
}
Expand Down
2 changes: 1 addition & 1 deletion api/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ pub struct Status {
impl Status {
pub fn from_tip_and_peers(current_tip: chain::Tip, connections: u32) -> Status {
Status {
protocol_version: p2p::msg::ProtocolVersion::default().into(),
protocol_version: ser::ProtocolVersion::local().into(),
user_agent: p2p::msg::USER_AGENT.to_string(),
connections: connections,
tip: Tip::from_tip(current_tip),
Expand Down
26 changes: 23 additions & 3 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use crate::core::core::{
};
use crate::core::global;
use crate::core::pow;
use crate::core::ser::{Readable, StreamingReader};
use crate::core::ser::{ProtocolVersion, Readable, StreamingReader};
use crate::error::{Error, ErrorKind};
use crate::pipe;
use crate::store;
Expand Down Expand Up @@ -386,7 +386,6 @@ impl Chain {
verifier_cache: self.verifier_cache.clone(),
txhashset,
batch,
orphans: self.orphans.clone(),
})
}

Expand Down Expand Up @@ -647,7 +646,7 @@ impl Chain {
/// TODO - Write this data to disk and validate the rebuilt kernel MMR.
pub fn kernel_data_write(&self, reader: &mut Read) -> Result<(), Error> {
let mut count = 0;
let mut stream = StreamingReader::new(reader, Duration::from_secs(1));
let mut stream = StreamingReader::new(reader, ProtocolVersion::local());
while let Ok(_kernel) = TxKernelEntry::read(&mut stream) {
count += 1;
}
Expand Down Expand Up @@ -685,6 +684,27 @@ impl Chain {
))
}

/// To support the ability to download the txhashset from multiple peers in parallel,
/// the peers must all agree on the exact binary representation of the txhashset.
/// This means compacting and rewinding to the exact same header.
/// Since compaction is a heavy operation, peers can agree to compact every 12 hours,
/// and no longer support requesting arbitrary txhashsets.
/// Here we return the header of the txhashset we are currently offering to peers.
pub fn txhashset_archive_header(&self) -> Result<BlockHeader, Error> {
let sync_threshold = global::state_sync_threshold() as u64;
let body_head = self.head()?;
let archive_interval = global::txhashset_archive_interval();
let mut txhashset_height = body_head.height.saturating_sub(sync_threshold);
txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval);

debug!(
"txhashset_archive_header: body_head - {}, {}, txhashset height - {}",
body_head.last_block_h, body_head.height, txhashset_height,
);

self.get_header_by_height(txhashset_height)
}

// Special handling to make sure the whole kernel set matches each of its
// roots in each block header, without truncation. We go back header by
// header, rewind and check each root. This fixes a potential weakness in
Expand Down
3 changes: 3 additions & 0 deletions chain/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,9 @@ pub enum ErrorKind {
/// Internal Roaring Bitmap error
#[fail(display = "Roaring Bitmap error")]
Bitmap,
/// Error during chain sync
#[fail(display = "Sync error")]
SyncError(String),
}

impl Display for Error {
Expand Down
4 changes: 3 additions & 1 deletion chain/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,6 @@ pub mod types;
pub use crate::chain::{Chain, MAX_ORPHAN_SIZE};
pub use crate::error::{Error, ErrorKind};
pub use crate::store::ChainStore;
pub use crate::types::{BlockStatus, ChainAdapter, Options, Tip, TxHashsetWriteStatus};
pub use crate::types::{
BlockStatus, ChainAdapter, Options, SyncState, SyncStatus, Tip, TxHashsetWriteStatus,
};
41 changes: 9 additions & 32 deletions chain/src/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

//! Implementation of the chain block acceptance (or refusal) pipeline.
use crate::chain::OrphanBlockPool;
use crate::core::consensus;
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::VerifierCache;
Expand Down Expand Up @@ -45,8 +44,6 @@ pub struct BlockContext<'a> {
pub batch: store::Batch<'a>,
/// The verifier cache (caching verifier for rangeproofs and kernel signatures)
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
/// Recent orphan blocks to avoid double-processing
pub orphans: Arc<OrphanBlockPool>,
}

/// Process a block header as part of processing a full block.
Expand Down Expand Up @@ -75,10 +72,9 @@ fn process_header_for_block(

// Check if we already know about this block for various reasons
// from cheapest to most expensive (delay hitting the db until last).
fn check_known(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
check_known_head(&block.header, ctx)?;
check_known_orphans(&block.header, ctx)?;
check_known_store(&block.header, ctx)?;
fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
check_known_head(header, ctx)?;
check_known_store(header, ctx)?;
Ok(())
}

Expand All @@ -99,7 +95,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
);

// Check if we have already processed this block previously.
check_known(b, ctx)?;
check_known(&b.header, ctx)?;

// Delay hitting the db for current chain head until we know
// this block is not already known.
Expand Down Expand Up @@ -260,19 +256,11 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
header.height,
); // keep this

check_header_known(header, ctx)?;
validate_header(header, ctx)?;
Ok(())
}
// Check if this header is already "known" from processing a previous block.
// Note: We are looking for a full block based on this header, not just the header itself.
check_known(header, ctx)?;

/// Quick in-memory check to fast-reject any block header we've already handled
/// recently. Keeps duplicates from the network in check.
/// ctx here is specific to the header_head (tip of the header chain)
fn check_header_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
let header_head = ctx.batch.header_head()?;
if header.hash() == header_head.last_block_h || header.hash() == header_head.prev_block_h {
return Err(ErrorKind::Unfit("header already known".to_string()).into());
}
validate_header(header, ctx)?;
Ok(())
}

Expand All @@ -288,15 +276,6 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<
Ok(())
}

/// Check if this block is in the set of known orphans.
fn check_known_orphans(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
if ctx.orphans.contains(&header.hash()) {
Err(ErrorKind::Unfit("already known in orphans".to_string()).into())
} else {
Ok(())
}
}

// Check if this block is in the store already.
fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
match ctx.batch.block_exists(&header.hash()) {
Expand Down Expand Up @@ -450,10 +429,8 @@ fn verify_coinbase_maturity(block: &Block, ext: &txhashset::Extension<'_>) -> Re
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
/// of the new block.
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension<'_>) -> Result<(), Error> {
// TODO - this is 2 db calls, can we optimize this?
// Retrieve the block_sums for the previous block.
let prev = ext.batch.get_previous_header(&b.header)?;
let block_sums = ext.batch.get_block_sums(&prev.hash())?;
let block_sums = ext.batch.get_block_sums(&b.header.prev_hash)?;

// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
Expand Down
Loading

0 comments on commit 441e846

Please sign in to comment.