Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DNM] kernel data file download #2743

Closed
wants to merge 11 commits into from
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions api/src/handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ use self::pool_api::PoolInfoHandler;
use self::pool_api::PoolPushHandler;
use self::server_api::IndexHandler;
use self::server_api::StatusHandler;
use self::server_api::KernelDownloadHandler;
use self::transactions_api::TxHashSetHandler;
use crate::auth::{BasicAuthMiddleware, GRIN_BASIC_REALM};
use crate::chain;
Expand Down Expand Up @@ -135,6 +136,9 @@ pub fn build_router(
chain: Arc::downgrade(&chain),
peers: Arc::downgrade(&peers),
};
let kernel_download_handler = KernelDownloadHandler {
peers: Arc::downgrade(&peers),
};
let txhashset_handler = TxHashSetHandler {
chain: Arc::downgrade(&chain),
};
Expand Down Expand Up @@ -165,6 +169,7 @@ pub fn build_router(
router.add_route("/v1/chain/validate", Arc::new(chain_validation_handler))?;
router.add_route("/v1/txhashset/*", Arc::new(txhashset_handler))?;
router.add_route("/v1/status", Arc::new(status_handler))?;
router.add_route("/v1/kerneldownload", Arc::new(kernel_download_handler))?;
router.add_route("/v1/pool", Arc::new(pool_info_handler))?;
router.add_route("/v1/pool/push", Arc::new(pool_push_handler))?;
router.add_route("/v1/peers/all", Arc::new(peers_all_handler))?;
Expand Down
39 changes: 38 additions & 1 deletion api/src/handlers/server_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::types::*;
use crate::web::*;
use hyper::{Body, Request};
use hyper::{Body, Request, StatusCode};
use std::sync::Weak;

// RESTful index of available api endpoints
Expand All @@ -36,6 +36,43 @@ impl Handler for IndexHandler {
}
}

pub struct KernelDownloadHandler {
pub peers: Weak<p2p::Peers>,
}

impl Handler for KernelDownloadHandler {
fn post(&self, _req: Request<Body>) -> ResponseFuture {
if let Some(peer) = w_fut!(&self.peers).most_work_peer() {
match peer.send_kernel_data_request() {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("requesting kernel data from peer failed: {:?}", e),
),
}
} else {
response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("requesting kernel data from peer failed (no peers)"),
)
}

// w_fut!(&self.peers).most_work_peer().and_then(|peer| {
// peer.send_kernel_data_request().and_then(|_| {
// response(StatusCode::OK, "{}")
// })
// })?;

// match w_fut!(&self.chain).xxx() {
// Ok(_) => response(StatusCode::OK, "{}"),
// Err(e) => response(
// StatusCode::INTERNAL_SERVER_ERROR,
// format!("requesting kernel download failed: {}", e),
// ),
// }
}
}

/// Status handler. Post a summary of the server status
/// GET /v1/status
pub struct StatusHandler {
Expand Down
1 change: 1 addition & 0 deletions chain/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ croaring = "0.3"
log = "0.4"
serde = "1"
serde_derive = "1"
tempfile = "3.0.5"
chrono = "0.4.4"
lru-cache = "0.1"
lazy_static = "1"
Expand Down
87 changes: 80 additions & 7 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,31 +17,38 @@

use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::PMMR;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, Output, OutputIdentifier, Transaction, TxKernelEntry,
Block, BlockHeader, BlockSums, Committed, Output, OutputIdentifier, Transaction, TxKernel,
TxKernelEntry,
};
use crate::core::global;
use crate::core::pow;
use crate::core::ser::{Readable, StreamingReader};
use crate::error::{Error, ErrorKind};
use crate::lmdb;
use crate::pipe;
use crate::store;
use crate::txhashset;
use crate::txhashset::RebuildableKernelView;
use crate::txhashset::TxHashSet;
use crate::types::{
BlockStatus, ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus,
};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{Mutex, RwLock, StopState};
use grin_store::pmmr::{PMMRBackend, PMMR_FILES};
use grin_store::Error::NotFoundErr;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tempfile;

/// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200;
Expand Down Expand Up @@ -672,6 +679,11 @@ impl Chain {
self.txhashset.read().roots()
}

pub fn kernel_data_read(&self) -> Result<File, Error> {
let txhashset = self.txhashset.read();
txhashset::rewindable_kernel_view(&txhashset, |view| view.kernel_data_read())
}

/// Provides a reading view into the current txhashset state as well as
/// the required indexes for a consumer to rewind to a consistent state
/// at the provided block hash.
Expand Down Expand Up @@ -710,7 +722,40 @@ impl Chain {
header: &BlockHeader,
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!("validate_kernel_history: rewinding and validating kernel history (readonly)");
debug!("validate_kernel_history: about to validate kernels via temporary view");

let mut kernel_data =
txhashset::rewindable_kernel_view(&txhashset, |view| view.kernel_data_read())?;

{
let tempdir = tempfile::tempdir()?;
let mut backend: PMMRBackend<TxKernel> = PMMRBackend::new(tempdir.path(), false, None)?;
let mut kernel_view = RebuildableKernelView::new(&mut backend);

// Rebuilding the kernel view will verify the following -
// * all kernel signatures
// * kernel MMR root matches for each historical header.
kernel_view.rebuild(&mut kernel_data, txhashset, header)?;
}

Ok(())
}

// For completeness we also (re)validate the kernel history by rewinding the kernel MMR
// in the txhashset.
// The call to validate_kernel_history above rebuilds the kernel MMR but we do not yet
// use this rebuilt kernel MMR (we simply discard it after validation).
// This is only a temprorary solution and can be simplified when we split kernels sync
// and txhashset sync.
// This prevents a malicious peer sending a kernel MMR where the data file and hash file
// are inconsistent. Once the kernel sync is fully implemented we will only receive a data
// file and this will no longer be required.
fn validate_kernel_history_via_rewind(
&self,
header: &BlockHeader,
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!("validate_kernel_history_via_rewind: about to validate kernels via rewind");

let mut count = 0;
let mut current = header.clone();
Expand Down Expand Up @@ -857,6 +902,26 @@ impl Chain {
txhashset::clean_header_folder(&sandbox_dir);
}

pub fn kernel_data_write(&self, reader: &mut Read) -> Result<(), Error> {
let txhashset = self.txhashset.read();
let head_header = self.head_header()?;

let tempdir = tempfile::tempdir()?;
let mut backend: PMMRBackend<TxKernel> = PMMRBackend::new(tempdir.path(), false, None)?;
let mut kernel_view = RebuildableKernelView::new(&mut backend);

// Rebuilding the kernel view will verify the following -
// * all kernel signatures
// * kernel MMR root matches for each historical header
{
let txhashset = self.txhashset.read();
kernel_view.rebuild(reader, &txhashset, &head_header)?;
}

// Backend storage will be deleted once the tempdir goes out of scope.
Ok(())
}

/// Writes a reading view on a txhashset state that's been provided to us.
/// If we're willing to accept that new state, the data stream will be
/// read as a zip file, unzipped and the resulting state files should be
Expand Down Expand Up @@ -897,11 +962,19 @@ impl Chain {
// We must rebuild the header MMR ourselves based on the headers in our db.
self.rebuild_header_mmr(&Tip::from_header(&header), &mut txhashset)?;

// Validate the full kernel history (kernel MMR root for every block header).
self.validate_kernel_history(&header, &txhashset)?;

// all good, prepare a new batch and update all the required records
debug!("txhashset_write: rewinding a 2nd time (writeable)");
// Validate the full kernel history -
// * all kernel signatures
// * kernel MMR root at each block height
{
// Validate kernel MMR roots and all kernel signatures by rebuilding the kernel
// MMR from the kernel data file.
self.validate_kernel_history(&header, &txhashset)?;

// (Re)validate kernel MMR roots here against the kernel MMR in the txhashset.
// Ensures a malicious peer does not send us inconsistent data and hash files.
// This will not be necessary once kernel sync is a separate payload.
self.validate_kernel_history_via_rewind(&header, &txhashset)?;
}

let mut batch = self.store.batch()?;

Expand Down
8 changes: 8 additions & 0 deletions chain/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,14 @@ impl From<io::Error> for Error {
}
}

impl From<ser::Error> for Error {
fn from(error: ser::Error) -> Error {
Error {
inner: Context::new(ErrorKind::SerErr(error)),
}
}
}

impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error {
Expand Down
2 changes: 2 additions & 0 deletions chain/src/txhashset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@
//! Utility structs to handle the 3 hashtrees (output, range proof,
//! kernel) more conveniently and transactionally.

mod rebuildable_kernel_view;
mod rewindable_kernel_view;
mod txhashset;
mod utxo_view;

pub use self::rebuildable_kernel_view::*;
pub use self::rewindable_kernel_view::*;
pub use self::txhashset::*;
pub use self::utxo_view::*;
Loading