diff --git a/Cargo.lock b/Cargo.lock index 590d7247f0..4767ab3f7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,6 +130,11 @@ dependencies = [ "which 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bit-vec" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bitflags" version = "0.9.1" @@ -719,6 +724,7 @@ dependencies = [ name = "grin_chain" version = "3.0.0-alpha.1" dependencies = [ + "bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -894,6 +900,7 @@ dependencies = [ name = "grin_store" version = "3.0.0-alpha.1" dependencies = [ + "bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "croaring 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2744,6 +2751,7 @@ dependencies = [ "checksum backtrace-sys 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "12cb9f1eef1d1fc869ad5a26c9fa48516339a15e54a227a25460fc304815fdb3" "checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" "checksum bindgen 0.37.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1b25ab82877ea8fe6ce1ce1f8ac54361f0218bad900af9eb11803994bf67c221" +"checksum bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a4523a10839ffae575fb08aa3423026c8cb4687eef43952afb956229d4f246f7" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" "checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" diff --git a/api/src/types.rs b/api/src/types.rs index a4ec25796d..c40c72f87d 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -119,7 +119,7 @@ impl TxHashSet { pub fn from_head(head: Arc) -> TxHashSet { let roots = head.get_txhashset_roots(); TxHashSet { - output_root_hash: roots.output_root.to_hex(), + output_root_hash: roots.output_root().to_hex(), range_proof_root_hash: roots.rproof_root.to_hex(), kernel_root_hash: roots.kernel_root.to_hex(), } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index ff7ee396d1..814518f5c8 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -10,6 +10,7 @@ workspace = ".." edition = "2018" [dependencies] +bit-vec = "0.6" bitflags = "1" byteorder = "1" failure = "0.1" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 676c69d928..c529d05ea0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -594,7 +594,7 @@ impl Chain { b.header.prev_root = prev_root; // Set the output, rangeproof and kernel MMR roots. - b.header.output_root = roots.output_root; + b.header.output_root = roots.output_root(); b.header.range_proof_root = roots.rproof_root; b.header.kernel_root = roots.kernel_root; diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index 6aceaf9e0a..6a23b355ba 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -15,10 +15,12 @@ //! Utility structs to handle the 3 hashtrees (output, range proof, //! kernel) more conveniently and transactionally. +mod bitmap_accumulator; mod rewindable_kernel_view; mod txhashset; mod utxo_view; +pub use self::bitmap_accumulator::*; pub use self::rewindable_kernel_view::*; pub use self::txhashset::*; pub use self::utxo_view::*; diff --git a/chain/src/txhashset/bitmap_accumulator.rs b/chain/src/txhashset/bitmap_accumulator.rs new file mode 100644 index 0000000000..4d3d7dcd5b --- /dev/null +++ b/chain/src/txhashset/bitmap_accumulator.rs @@ -0,0 +1,239 @@ +// Copyright 2019 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::convert::TryFrom; +use std::time::Instant; + +use bit_vec::BitVec; +use croaring::Bitmap; + +use crate::core::core::hash::{DefaultHashable, Hash}; +use crate::core::core::pmmr::{self, ReadonlyPMMR, VecBackend, PMMR}; +use crate::core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer}; +use crate::error::{Error, ErrorKind}; + +/// The "bitmap accumulator" allows us to commit to a specific bitmap by splitting it into +/// fragments and inserting these fragments into an MMR to produce an overall root hash. +/// Leaves in the MMR are fragments of the bitmap consisting of 1024 contiguous bits +/// from the overall bitmap. The first (leftmost) leaf in the MMR represents the first 1024 bits +/// of the bitmap, the next leaf is the next 1024 bits of the bitmap etc. +/// +/// Flipping a single bit does not require the full bitmap to be rehashed, only the path from the +/// relevant leaf up to its associated peak. +/// +/// Flipping multiple bits *within* a single chunk is no more expensive than flipping a single bit +/// as a leaf node in the MMR represents a sequence of 1024 bits. Flipping multiple bits located +/// close together is a relatively cheap operation with minimal rehashing required to update the +/// relevant peaks and the overall MMR root. +/// +/// It is also possible to generate Merkle proofs for these 1024 bit fragments, proving +/// both inclusion and location in the overall "accumulator" MMR. We plan to take advantage of +/// this during fast sync, allowing for validation of partial data. +/// +#[derive(Clone)] +pub struct BitmapAccumulator { + backend: VecBackend, +} + +impl BitmapAccumulator { + /// Crate a new empty bitmap accumulator. + pub fn new() -> BitmapAccumulator { + BitmapAccumulator { + backend: VecBackend::new_hash_only(), + } + } + + /// Initialize a bitmap accumulator given the provided idx iterator. + pub fn init>(&mut self, idx: T, size: u64) -> Result<(), Error> { + self.apply_from(idx, 0, size) + } + + /// Find the start of the first "chunk" of 1024 bits from the provided idx. + /// Zero the last 10 bits to round down to multiple of 1024. + pub fn chunk_start_idx(idx: u64) -> u64 { + idx & !0x3ff + } + + /// The first 1024 belong to chunk 0, the next 1024 to chunk 1 etc. + fn chunk_idx(idx: u64) -> u64 { + idx / 1024 + } + + /// Apply the provided idx iterator to our bitmap accumulator. + /// We start at the chunk containing from_idx and rebuild chunks as necessary + /// for the bitmap, limiting it to size (in bits). + /// If from_idx is 1023 and size is 1024 then we rebuild a single chunk. + fn apply_from(&mut self, idx: T, from_idx: u64, size: u64) -> Result<(), Error> + where + T: IntoIterator, + { + let now = Instant::now(); + + // Find the (1024 bit chunk) chunk_idx for the (individual bit) from_idx. + let from_chunk_idx = BitmapAccumulator::chunk_idx(from_idx); + let mut chunk_idx = from_chunk_idx; + + let mut chunk = BitmapChunk::new(); + + let mut idx_iter = idx.into_iter().filter(|&x| x < size).peekable(); + while let Some(x) = idx_iter.peek() { + if *x < chunk_idx * 1024 { + // skip until we reach our first chunk + idx_iter.next(); + } else if *x < (chunk_idx + 1) * 1024 { + let idx = idx_iter.next().expect("next after peek"); + chunk.set(idx % 1024, true); + } else { + self.append_chunk(chunk)?; + chunk_idx += 1; + chunk = BitmapChunk::new(); + } + } + if chunk.any() { + self.append_chunk(chunk)?; + } + debug!( + "applied {} chunks from idx {} to idx {} ({}ms)", + 1 + chunk_idx - from_chunk_idx, + from_chunk_idx, + chunk_idx, + now.elapsed().as_millis(), + ); + Ok(()) + } + + /// Apply updates to the bitmap accumulator given an iterator of invalidated idx and + /// an iterator of idx to be set to true. + /// We determine the existing chunks to be rebuilt given the invalidated idx. + /// We then rebuild given idx, extending the accumulator with new chunk(s) as necessary. + /// Resulting bitmap accumulator will contain sufficient bitmap chunks to cover size. + /// If size is 1 then we will have a single chunk. + /// If size is 1023 then we will have a single chunk (bits 0 to 1023 inclusive). + /// If the size is 1024 then we will have two chunks. + pub fn apply(&mut self, invalidated_idx: T, idx: U, size: u64) -> Result<(), Error> + where + T: IntoIterator, + U: IntoIterator, + { + // Determine the earliest chunk by looking at the min invalidated idx (assume sorted). + // Rewind prior to this and reapply new_idx. + // Note: We rebuild everything after rewind point but much of the bitmap may be + // unchanged. This can be further optimized by only rebuilding necessary chunks and + // rehashing. + if let Some(from_idx) = invalidated_idx.into_iter().next() { + self.rewind_prior(from_idx)?; + self.pad_left(from_idx)?; + self.apply_from(idx, from_idx, size)?; + } + + Ok(()) + } + + /// Given the provided (bit) idx rewind the bitmap accumulator to the end of the + /// previous chunk ready for the updated chunk to be appended. + fn rewind_prior(&mut self, from_idx: u64) -> Result<(), Error> { + let chunk_idx = BitmapAccumulator::chunk_idx(from_idx); + let last_pos = self.backend.size(); + let mut pmmr = PMMR::at(&mut self.backend, last_pos); + let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1); + let rewind_pos = chunk_pos.saturating_sub(1); + pmmr.rewind(rewind_pos, &Bitmap::create()) + .map_err(|e| ErrorKind::Other(e))?; + Ok(()) + } + + /// Make sure we append empty chunks to fill in any gap before we append the chunk + /// we actually care about. This effectively pads the bitmap with 1024 chunks of 0s + /// as necessary to put the new chunk at the correct place. + fn pad_left(&mut self, from_idx: u64) -> Result<(), Error> { + let chunk_idx = BitmapAccumulator::chunk_idx(from_idx); + let current_chunk_idx = pmmr::n_leaves(self.backend.size()); + for _ in current_chunk_idx..chunk_idx { + self.append_chunk(BitmapChunk::new())?; + } + Ok(()) + } + + /// Append a new chunk to the BitmapAccumulator. + /// Append parent hashes (if any) as necessary to build associated peak. + pub fn append_chunk(&mut self, chunk: BitmapChunk) -> Result { + let last_pos = self.backend.size(); + PMMR::at(&mut self.backend, last_pos) + .push(&chunk) + .map_err(|e| ErrorKind::Other(e).into()) + } + + /// The root hash of the bitmap accumulator MMR. + pub fn root(&self) -> Hash { + ReadonlyPMMR::at(&self.backend, self.backend.size()).root() + } +} + +/// A bitmap "chunk" representing 1024 contiguous bits of the overall bitmap. +/// The first 1024 bits belong in one chunk. The next 1024 bits in the next chunk, etc. +#[derive(Clone, Debug)] +pub struct BitmapChunk(BitVec); + +impl BitmapChunk { + const LEN_BITS: usize = 1024; + const LEN_BYTES: usize = Self::LEN_BITS / 8; + + /// Create a new bitmap chunk, defaulting all bits in the chunk to false. + pub fn new() -> BitmapChunk { + BitmapChunk(BitVec::from_elem(Self::LEN_BITS, false)) + } + + /// Set a single bit in this chunk. + /// 0-indexed from start of chunk. + /// Panics if idx is outside the valid range of bits in a chunk. + pub fn set(&mut self, idx: u64, value: bool) { + let idx = usize::try_from(idx).expect("usize from u64"); + assert!(idx < Self::LEN_BITS); + self.0.set(idx, value) + } + + /// Does this bitmap chunk have any bits set to 1? + pub fn any(&self) -> bool { + self.0.any() + } +} + +impl PMMRable for BitmapChunk { + type E = Self; + + fn as_elmt(&self) -> Self::E { + self.clone() + } +} + +impl FixedLength for BitmapChunk { + const LEN: usize = Self::LEN_BYTES; +} + +impl DefaultHashable for BitmapChunk {} + +impl Writeable for BitmapChunk { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + self.0.to_bytes().write(writer) + } +} + +impl Readable for BitmapChunk { + /// Reading is not currently supported, just return an empty one for now. + /// We store the underlying roaring bitmap externally for the bitmap accumulator + /// and the "hash only" backend means we never actually read these chunks. + fn read(_reader: &mut dyn Reader) -> Result { + Ok(BitmapChunk::new()) + } +} diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 11adcff23e..ba2d3ef0f6 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -23,8 +23,9 @@ use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxK use crate::core::ser::{PMMRIndexHashable, PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; use crate::store::{Batch, ChainStore}; +use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{OutputMMRPosition, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{OutputMMRPosition, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::{file, secp_static, zip}; use croaring::Bitmap; @@ -117,6 +118,8 @@ pub struct TxHashSet { rproof_pmmr_h: PMMRHandle, kernel_pmmr_h: PMMRHandle, + bitmap_accumulator: BitmapAccumulator, + // chain store used as index of commitments to MMR positions commit_index: Arc, } @@ -148,6 +151,9 @@ impl TxHashSet { header, )?; + // Initialize the bitmap accumulator from the current output PMMR. + let bitmap_accumulator = TxHashSet::bitmap_accumulator(&output_pmmr_h)?; + let mut maybe_kernel_handle: Option> = None; let versions = vec![ProtocolVersion(2), ProtocolVersion(1)]; for version in versions { @@ -195,6 +201,7 @@ impl TxHashSet { output_pmmr_h, rproof_pmmr_h, kernel_pmmr_h, + bitmap_accumulator, commit_index, }) } else { @@ -202,6 +209,15 @@ impl TxHashSet { } } + // Build a new bitmap accumulator for the provided output PMMR. + fn bitmap_accumulator(pmmr_h: &PMMRHandle) -> Result { + let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.last_pos); + let size = pmmr::n_leaves(pmmr_h.last_pos); + let mut bitmap_accumulator = BitmapAccumulator::new(); + bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), size)?; + Ok(bitmap_accumulator) + } + /// Close all backend file handles pub fn release_backend_files(&mut self) { self.output_pmmr_h.backend.release_files(); @@ -329,7 +345,10 @@ impl TxHashSet { ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); TxHashSetRoots { - output_root: output_pmmr.root(), + output_roots: OutputRoots { + pmmr_root: output_pmmr.root(), + bitmap_root: self.bitmap_accumulator.root(), + }, rproof_root: rproof_pmmr.root(), kernel_root: kernel_pmmr.root(), } @@ -554,6 +573,7 @@ where let sizes: (u64, u64, u64); let res: Result; let rollback: bool; + let bitmap_accumulator: BitmapAccumulator; let head = batch.head()?; @@ -581,6 +601,7 @@ where rollback = extension_pair.extension.rollback; sizes = extension_pair.extension.sizes(); + bitmap_accumulator = extension_pair.extension.bitmap_accumulator.clone(); } // During an extension we do not want to modify the header_extension (and only read from it). @@ -610,6 +631,9 @@ where trees.output_pmmr_h.last_pos = sizes.0; trees.rproof_pmmr_h.last_pos = sizes.1; trees.kernel_pmmr_h.last_pos = sizes.2; + + // Update our bitmap_accumulator based on our extension + trees.bitmap_accumulator = bitmap_accumulator; } trace!("TxHashSet extension done."); @@ -826,6 +850,8 @@ pub struct Extension<'a> { rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend>, kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend>, + bitmap_accumulator: BitmapAccumulator, + /// Rollback flag. rollback: bool, @@ -879,6 +905,7 @@ impl<'a> Extension<'a> { &mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos, ), + bitmap_accumulator: trees.bitmap_accumulator.clone(), rollback: false, batch, } @@ -901,28 +928,53 @@ impl<'a> Extension<'a> { /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> { + let mut affected_pos = vec![]; + for out in b.outputs() { let pos = self.apply_output(out)?; - // Update the (output_pos,height) index for the new output. + affected_pos.push(pos); self.batch .save_output_pos_height(&out.commitment(), pos, b.header.height)?; } for input in b.inputs() { - self.apply_input(input)?; + let pos = self.apply_input(input)?; + affected_pos.push(pos); } for kernel in b.kernels() { self.apply_kernel(kernel)?; } + // Update our BitmapAccumulator based on affected outputs (both spent and created). + self.apply_to_bitmap_accumulator(&affected_pos)?; + // Update the head of the extension to reflect the block we just applied. self.head = Tip::from_header(&b.header); Ok(()) } - fn apply_input(&mut self, input: &Input) -> Result<(), Error> { + fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> { + // if self.output_pmmr.is_empty() || output_pos.is_empty() { + // return Ok(()); + // } + let mut output_idx: Vec<_> = output_pos + .iter() + .map(|x| pmmr::n_leaves(*x).saturating_sub(1)) + .collect(); + output_idx.sort_unstable(); + let min_idx = output_idx.first().cloned().unwrap_or(0); + let size = pmmr::n_leaves(self.output_pmmr.last_pos); + self.bitmap_accumulator.apply( + output_idx, + self.output_pmmr + .leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)), + size, + ) + } + + fn apply_input(&mut self, input: &Input) -> Result { let commit = input.commitment(); let pos_res = self.batch.get_output_pos(&commit); if let Ok(pos) = pos_res { @@ -943,14 +995,14 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(|e| ErrorKind::TxHashSetErr(e))?; + Ok(pos) } - Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()), - Err(e) => return Err(ErrorKind::TxHashSetErr(e).into()), + Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), + Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), } } else { - return Err(ErrorKind::AlreadySpent(commit).into()); + Err(ErrorKind::AlreadySpent(commit).into()) } - Ok(()) } fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> { @@ -1083,6 +1135,13 @@ impl<'a> Extension<'a> { self.kernel_pmmr .rewind(kernel_pos, &Bitmap::create()) .map_err(&ErrorKind::TxHashSetErr)?; + + // Update our BitmapAccumulator based on affected outputs. + // We want to "unspend" every rewound spent output. + // Treat output_pos as an affected output to ensure we rebuild far enough back. + let mut affected_pos: Vec<_> = rewind_rm_pos.iter().map(|x| x as u64).collect(); + affected_pos.push(output_pos); + self.apply_to_bitmap_accumulator(&affected_pos)?; Ok(()) } @@ -1090,10 +1149,13 @@ impl<'a> Extension<'a> { /// and kernel sum trees. pub fn roots(&self) -> Result { Ok(TxHashSetRoots { - output_root: self - .output_pmmr - .root() - .map_err(|_| ErrorKind::InvalidRoot)?, + output_roots: OutputRoots { + pmmr_root: self + .output_pmmr + .root() + .map_err(|_| ErrorKind::InvalidRoot)?, + bitmap_root: self.bitmap_accumulator.root(), + }, rproof_root: self .rproof_pmmr .root() @@ -1111,16 +1173,7 @@ impl<'a> Extension<'a> { return Ok(()); } let head_header = self.batch.get_block_header(&self.head.hash())?; - let header_roots = TxHashSetRoots { - output_root: head_header.output_root, - rproof_root: head_header.range_proof_root, - kernel_root: head_header.kernel_root, - }; - if header_roots != self.roots()? { - Err(ErrorKind::InvalidRoot.into()) - } else { - Ok(()) - } + self.roots()?.validate(&head_header) } /// Validate the header, output and kernel MMR sizes against the block header. diff --git a/chain/src/types.rs b/chain/src/types.rs index b672ee02cf..cbf61879a4 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::{Block, BlockHeader}; use crate::core::pow::Difficulty; -use crate::core::ser; -use crate::error::Error; +use crate::core::ser::{self, PMMRIndexHashable}; +use crate::error::{Error, ErrorKind}; use crate::util::RwLock; bitflags! { @@ -181,18 +181,70 @@ impl TxHashsetWriteStatus for SyncState { } } -/// A helper to hold the roots of the txhashset in order to keep them -/// readable. -#[derive(Debug, PartialEq)] +/// A helper for the various txhashset MMR roots. +#[derive(Debug)] pub struct TxHashSetRoots { - /// Output root - pub output_root: Hash, + /// Output roots + pub output_roots: OutputRoots, /// Range Proof root pub rproof_root: Hash, /// Kernel root pub kernel_root: Hash, } +impl TxHashSetRoots { + /// Accessor for the underlying output PMMR root + pub fn output_root(&self) -> Hash { + self.output_roots.output_root() + } + + /// Validate roots against the provided block header. + pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> { + debug!( + "validate roots: {} at {}, output_root: {}, output pmmr: {} (bitmap: {}, merged: {})", + header.hash(), + header.height, + header.output_root, + self.output_roots.output_root(), + self.output_roots.bitmap_root, + self.output_roots.merged_root(header), + ); + + if header.output_root != self.output_roots.pmmr_root { + Err(ErrorKind::InvalidRoot.into()) + } else if header.range_proof_root != self.rproof_root { + Err(ErrorKind::InvalidRoot.into()) + } else if header.kernel_root != self.kernel_root { + Err(ErrorKind::InvalidRoot.into()) + } else { + Ok(()) + } + } +} + +/// A helper for the various output roots. +#[derive(Debug)] +pub struct OutputRoots { + /// The output PMMR root + pub pmmr_root: Hash, + /// The bitmap accumulator root + pub bitmap_root: Hash, +} + +impl OutputRoots { + /// The root of the underlying output PMMR. + pub fn output_root(&self) -> Hash { + self.pmmr_root + } + + /// Hash the root of the output PMMR and the root of the bitmap accumulator + /// together with the size of the output PMMR (for consistency with existing PMMR impl). + /// H(pmmr_size | pmmr_root | bitmap_root) + pub fn merged_root(&self, header: &BlockHeader) -> Hash { + (self.pmmr_root, self.bitmap_root).hash_with_index(header.output_mmr_size) + } +} + /// A helper to hold the output pmmr position of the txhashset in order to keep them /// readable. #[derive(Debug)] diff --git a/chain/tests/bitmap_accumulator.rs b/chain/tests/bitmap_accumulator.rs new file mode 100644 index 0000000000..38d03cc40f --- /dev/null +++ b/chain/tests/bitmap_accumulator.rs @@ -0,0 +1,188 @@ +// Copyright 2019 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use self::chain::txhashset::BitmapAccumulator; +use self::core::core::hash::Hash; +use self::core::ser::PMMRIndexHashable; +use bit_vec::BitVec; +use grin_chain as chain; +use grin_core as core; +use grin_util as util; + +#[test] +fn test_bitmap_accumulator() { + util::init_test_logger(); + + let mut accumulator = BitmapAccumulator::new(); + assert_eq!(accumulator.root(), Hash::default()); + + // 1000... (rebuild from 0, setting [0] true) + accumulator.apply(vec![0], vec![0], 1).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(0, true); + bit_vec.to_bytes().hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // 1100... (rebuild from 0, setting [0, 1] true) + accumulator.apply(vec![0], vec![0, 1], 2).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(0, true); + bit_vec.set(1, true); + bit_vec.to_bytes().hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // 0100... (rebuild from 0, setting [1] true, which will reset [0] false) + accumulator.apply(vec![0], vec![1], 2).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + let expected_bytes = bit_vec.to_bytes(); + expected_bytes.hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // 0100... (rebuild from 1, setting [1] true) + accumulator.apply(vec![1], vec![1], 2).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + let expected_bytes = bit_vec.to_bytes(); + expected_bytes.hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // 0100...0001 (rebuild from 0, setting [1, 1023] true) + accumulator.apply(vec![0], vec![1, 1023], 1024).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + bit_vec.set(1023, true); + let expected_bytes = bit_vec.to_bytes(); + expected_bytes.hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Now set bits such that we extend the bitmap accumulator across multiple 1024 bit chunks. + // We need a second bit_vec here to reflect the additional chunk. + // 0100...0001, 1000...0000 (rebuild from 0, setting [1, 1023, 1024] true) + accumulator + .apply(vec![0], vec![1, 1023, 1024], 1025) + .unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + bit_vec.set(1023, true); + let mut bit_vec2 = BitVec::from_elem(1024, false); + bit_vec2.set(0, true); + let expected_bytes_0 = bit_vec.to_bytes(); + let expected_bytes_1 = bit_vec2.to_bytes(); + let expected_hash_0 = expected_bytes_0.hash_with_index(0); + let expected_hash_1 = expected_bytes_1.hash_with_index(1); + (expected_hash_0, expected_hash_1).hash_with_index(2) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Just rebuild the second bitmap chunk. + // 0100...0001, 0100...0000 (rebuild from 1025, setting [1025] true) + accumulator.apply(vec![1025], vec![1025], 1026).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + bit_vec.set(1023, true); + let mut bit_vec2 = BitVec::from_elem(1024, false); + bit_vec2.set(1, true); + let expected_bytes_0 = bit_vec.to_bytes(); + let expected_bytes_1 = bit_vec2.to_bytes(); + let expected_hash_0 = expected_bytes_0.hash_with_index(0); + let expected_hash_1 = expected_bytes_1.hash_with_index(1); + (expected_hash_0, expected_hash_1).hash_with_index(2) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Rebuild the first bitmap chunk and all chunks after it. + // 0100...0000, 0100...0000 (rebuild from 1, setting [1, 1025] true) + accumulator.apply(vec![1], vec![1, 1025], 1026).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1, true); + let mut bit_vec2 = BitVec::from_elem(1024, false); + bit_vec2.set(1, true); + let expected_bytes_0 = bit_vec.to_bytes(); + let expected_bytes_1 = bit_vec2.to_bytes(); + let expected_hash_0 = expected_bytes_0.hash_with_index(0); + let expected_hash_1 = expected_bytes_1.hash_with_index(1); + (expected_hash_0, expected_hash_1).hash_with_index(2) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Make sure we handle the case where the first chunk is all 0s + // 0000...0000, 0100...0000 (rebuild from 1, setting [1025] true) + accumulator.apply(vec![1], vec![1025], 1026).unwrap(); + let expected_hash = { + let bit_vec = BitVec::from_elem(1024, false); + let mut bit_vec2 = BitVec::from_elem(1024, false); + bit_vec2.set(1, true); + let expected_bytes_0 = bit_vec.to_bytes(); + let expected_bytes_1 = bit_vec2.to_bytes(); + let expected_hash_0 = expected_bytes_0.hash_with_index(0); + let expected_hash_1 = expected_bytes_1.hash_with_index(1); + (expected_hash_0, expected_hash_1).hash_with_index(2) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Check that removing the last bit in a chunk removes the now empty chunk + // if it is the rightmost chunk. + // 0000...0001 (rebuild from 1023, setting [1023] true) + accumulator.apply(vec![1023], vec![1023], 1024).unwrap(); + let expected_hash = { + let mut bit_vec = BitVec::from_elem(1024, false); + bit_vec.set(1023, true); + let expected_bytes = bit_vec.to_bytes(); + expected_bytes.hash_with_index(0) + }; + assert_eq!(accumulator.root(), expected_hash); + + // Make sure we pad appropriately with 0s if we set a distant bit to 1. + // Start with an empty accumulator. + // 0000...0000, 0000...0000, 0000...0000, 0000...0001 (rebuild from 4095, setting [4095] true) + let mut accumulator = BitmapAccumulator::new(); + accumulator.apply(vec![4095], vec![4095], 4096).unwrap(); + let expected_hash = { + let bit_vec0 = BitVec::from_elem(1024, false); + let bit_vec1 = BitVec::from_elem(1024, false); + let bit_vec2 = BitVec::from_elem(1024, false); + let mut bit_vec3 = BitVec::from_elem(1024, false); + bit_vec3.set(1023, true); + + let expected_bytes_0 = bit_vec0.to_bytes(); + let expected_bytes_1 = bit_vec1.to_bytes(); + let expected_bytes_2 = bit_vec2.to_bytes(); + let expected_bytes_3 = bit_vec3.to_bytes(); + + let expected_hash_0 = expected_bytes_0.hash_with_index(0); + let expected_hash_1 = expected_bytes_1.hash_with_index(1); + let expected_hash_2 = (expected_hash_0, expected_hash_1).hash_with_index(2); + + let expected_hash_3 = expected_bytes_2.hash_with_index(3); + let expected_hash_4 = expected_bytes_3.hash_with_index(4); + let expected_hash_5 = (expected_hash_3, expected_hash_4).hash_with_index(5); + + (expected_hash_2, expected_hash_5).hash_with_index(6) + }; + assert_eq!(accumulator.root(), expected_hash); +} diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index 0116f533a3..6b96b72b3c 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -40,8 +40,10 @@ mod backend; mod pmmr; mod readonly_pmmr; mod rewindable_pmmr; +mod vec_backend; pub use self::backend::*; pub use self::pmmr::*; pub use self::readonly_pmmr::*; pub use self::rewindable_pmmr::*; +pub use self::vec_backend::*; diff --git a/core/src/core/pmmr/backend.rs b/core/src/core/pmmr/backend.rs index 6c0aae6eb0..5a9ef90c67 100644 --- a/core/src/core/pmmr/backend.rs +++ b/core/src/core/pmmr/backend.rs @@ -58,6 +58,10 @@ pub trait Backend { /// Number of leaves fn n_unpruned_leaves(&self) -> u64; + /// Iterator over current (unpruned, unremoved) leaf insertion index. + /// Note: This differs from underlying MMR pos - [0, 1, 2, 3, 4] vs. [1, 2, 4, 5, 8]. + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_>; + /// Remove Hash by insertion position. An index is also provided so the /// underlying backend can implement some rollback of positions up to a /// given index (practically the index is the height of a block that diff --git a/core/src/core/pmmr/pmmr.rs b/core/src/core/pmmr/pmmr.rs index 106a91d610..4d447d1d64 100644 --- a/core/src/core/pmmr/pmmr.rs +++ b/core/src/core/pmmr/pmmr.rs @@ -84,6 +84,11 @@ where self.backend.n_unpruned_leaves() } + /// Iterator over current (unpruned, unremoved) leaf insertion indices. + pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator + '_ { + self.backend.leaf_idx_iter(from_idx) + } + /// Returns a vec of the peaks of this MMR. pub fn peaks(&self) -> Vec { let peaks_pos = peaks(self.last_pos); diff --git a/core/src/core/pmmr/readonly_pmmr.rs b/core/src/core/pmmr/readonly_pmmr.rs index e4eaee864f..0b013c6f36 100644 --- a/core/src/core/pmmr/readonly_pmmr.rs +++ b/core/src/core/pmmr/readonly_pmmr.rs @@ -91,6 +91,11 @@ where self.backend.leaf_pos_iter() } + /// Iterator over current (unpruned, unremoved) leaf insertion indices. + pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator + '_ { + self.backend.leaf_idx_iter(from_idx) + } + /// Is the MMR empty? pub fn is_empty(&self) -> bool { self.last_pos == 0 diff --git a/core/src/core/pmmr/vec_backend.rs b/core/src/core/pmmr/vec_backend.rs new file mode 100644 index 0000000000..47f788a63c --- /dev/null +++ b/core/src/core/pmmr/vec_backend.rs @@ -0,0 +1,153 @@ +// Copyright 2019 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashSet; +use std::convert::TryFrom; +use std::fs::File; + +use croaring::Bitmap; + +use crate::core::hash::Hash; +use crate::core::pmmr::{self, Backend}; +use crate::core::BlockHeader; +use crate::ser::PMMRable; + +/// Simple/minimal/naive MMR backend implementation backed by Vec and Vec. +/// Removed pos are maintained in a HashSet. +#[derive(Clone, Debug)] +pub struct VecBackend { + /// Backend elements (optional, possible to just store hashes). + pub data: Option>, + /// Vec of hashes for the PMMR (both leaves and parents). + pub hashes: Vec, + /// Positions of removed elements (is this applicable if we do not store data?) + pub removed: HashSet, +} + +impl Backend for VecBackend { + fn append(&mut self, elmt: &T, hashes: Vec) -> Result<(), String> { + if let Some(data) = &mut self.data { + data.push(elmt.clone()); + } + self.hashes.append(&mut hashes.clone()); + Ok(()) + } + + fn get_hash(&self, position: u64) -> Option { + if self.removed.contains(&position) { + None + } else { + self.get_from_file(position) + } + } + + fn get_data(&self, position: u64) -> Option { + if self.removed.contains(&position) { + None + } else { + self.get_data_from_file(position) + } + } + + fn get_from_file(&self, position: u64) -> Option { + let idx = usize::try_from(position.saturating_sub(1)).expect("usize from u64"); + self.hashes.get(idx).cloned() + } + + fn get_data_from_file(&self, position: u64) -> Option { + if let Some(data) = &self.data { + let idx = usize::try_from(pmmr::n_leaves(position).saturating_sub(1)) + .expect("usize from u64"); + data.get(idx).map(|x| x.as_elmt()) + } else { + None + } + } + + fn data_as_temp_file(&self) -> Result { + unimplemented!() + } + + /// Number of leaves in the MMR + fn n_unpruned_leaves(&self) -> u64 { + unimplemented!() + } + + fn leaf_pos_iter(&self) -> Box + '_> { + Box::new( + self.hashes + .iter() + .enumerate() + .map(|(x, _)| (x + 1) as u64) + .filter(move |x| pmmr::is_leaf(*x) && !self.removed.contains(x)), + ) + } + + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { + let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + Box::new( + self.leaf_pos_iter() + .skip_while(move |x| *x < from_pos) + .map(|x| pmmr::n_leaves(x).saturating_sub(1)), + ) + } + + fn remove(&mut self, position: u64) -> Result<(), String> { + self.removed.insert(position); + Ok(()) + } + + fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> { + if let Some(data) = &mut self.data { + let idx = pmmr::n_leaves(position); + data.truncate(usize::try_from(idx).expect("usize from u64")); + } + self.hashes + .truncate(usize::try_from(position).expect("usize from u64")); + Ok(()) + } + + fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> { + Ok(()) + } + + fn release_files(&mut self) {} + + fn dump_stats(&self) {} +} + +impl VecBackend { + /// Instantiates a new empty vec backend. + pub fn new() -> VecBackend { + VecBackend { + data: Some(vec![]), + hashes: vec![], + removed: HashSet::new(), + } + } + + /// Instantiate a new empty "hash only" vec backend. + pub fn new_hash_only() -> VecBackend { + VecBackend { + data: None, + hashes: vec![], + removed: HashSet::new(), + } + } + + /// Size of this vec backend in hashes. + pub fn size(&self) -> u64 { + self.hashes.len() as u64 + } +} diff --git a/core/tests/block.rs b/core/tests/block.rs index 2a9bb2213b..a1d3b67a9e 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod common; +mod common; use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; use crate::core::consensus::BLOCK_OUTPUT_WEIGHT; use crate::core::core::block::Error; diff --git a/core/tests/common.rs b/core/tests/common.rs index b4816a60b9..4e1b3040fd 100644 --- a/core/tests/common.rs +++ b/core/tests/common.rs @@ -15,15 +15,18 @@ //! Common test functions use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction}; +use grin_core::core::hash::DefaultHashable; use grin_core::libtx::{ build::{self, input, output}, proof::{ProofBuild, ProofBuilder}, reward, }; use grin_core::pow::Difficulty; +use grin_core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer}; use keychain::{Identifier, Keychain}; // utility producing a transaction with 2 inputs and a single outputs +#[allow(dead_code)] pub fn tx2i1o() -> Transaction { let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); @@ -41,6 +44,7 @@ pub fn tx2i1o() -> Transaction { } // utility producing a transaction with a single input and output +#[allow(dead_code)] pub fn tx1i1o() -> Transaction { let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); @@ -59,6 +63,7 @@ pub fn tx1i1o() -> Transaction { // utility producing a transaction with a single input // and two outputs (one change output) // Note: this tx has an "offset" kernel +#[allow(dead_code)] pub fn tx1i2o() -> Transaction { let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); @@ -77,6 +82,7 @@ pub fn tx1i2o() -> Transaction { // utility to create a block without worrying about the key or previous // header +#[allow(dead_code)] pub fn new_block( txs: Vec<&Transaction>, keychain: &K, @@ -101,6 +107,7 @@ where // utility producing a transaction that spends an output with the provided // value and blinding key +#[allow(dead_code)] pub fn txspend1i1o( v: u64, keychain: &K, @@ -120,3 +127,40 @@ where ) .unwrap() } + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct TestElem(pub [u32; 4]); + +impl DefaultHashable for TestElem {} + +impl FixedLength for TestElem { + const LEN: usize = 16; +} + +impl PMMRable for TestElem { + type E = Self; + + fn as_elmt(&self) -> Self::E { + self.clone() + } +} + +impl Writeable for TestElem { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + r#try!(writer.write_u32(self.0[0])); + r#try!(writer.write_u32(self.0[1])); + r#try!(writer.write_u32(self.0[2])); + writer.write_u32(self.0[3]) + } +} + +impl Readable for TestElem { + fn read(reader: &mut dyn Reader) -> Result { + Ok(TestElem([ + reader.read_u32()?, + reader.read_u32()?, + reader.read_u32()?, + reader.read_u32()?, + ])) + } +} diff --git a/core/tests/merkle_proof.rs b/core/tests/merkle_proof.rs index 14f0099eb6..09bea77fbd 100644 --- a/core/tests/merkle_proof.rs +++ b/core/tests/merkle_proof.rs @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod vec_backend; +mod common; use self::core::core::merkle_proof::MerkleProof; -use self::core::core::pmmr::PMMR; +use self::core::core::pmmr::{VecBackend, PMMR}; use self::core::ser::{self, PMMRIndexHashable}; -use crate::vec_backend::{TestElem, VecBackend}; +use crate::common::TestElem; use grin_core as core; #[test] diff --git a/core/tests/pmmr.rs b/core/tests/pmmr.rs index 74d17559f6..7da9e13575 100644 --- a/core/tests/pmmr.rs +++ b/core/tests/pmmr.rs @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod vec_backend; +mod common; use self::core::core::hash::Hash; -use self::core::core::pmmr::{self, PMMR}; +use self::core::core::pmmr::{self, VecBackend, PMMR}; use self::core::ser::PMMRIndexHashable; -use crate::vec_backend::{TestElem, VecBackend}; +use crate::common::TestElem; use chrono::prelude::Utc; use grin_core as core; use std::u64; @@ -433,7 +433,7 @@ fn pmmr_prune() { // First check the initial numbers of elements. assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 0); + assert_eq!(ba.removed.len(), 0); // pruning a leaf with no parent should do nothing { @@ -442,7 +442,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 1); + assert_eq!(ba.removed.len(), 1); // pruning leaves with no shared parent just removes 1 element { @@ -451,7 +451,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 2); + assert_eq!(ba.removed.len(), 2); { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); @@ -459,7 +459,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 3); + assert_eq!(ba.removed.len(), 3); // pruning a non-leaf node has no effect { @@ -468,7 +468,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 3); + assert_eq!(ba.removed.len(), 3); // TODO - no longer true (leaves only now) - pruning sibling removes subtree { @@ -477,7 +477,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 4); + assert_eq!(ba.removed.len(), 4); // TODO - no longer true (leaves only now) - pruning all leaves under level >1 // removes all subtree @@ -487,7 +487,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 5); + assert_eq!(ba.removed.len(), 5); // pruning everything should only leave us with a single peak { @@ -498,7 +498,7 @@ fn pmmr_prune() { assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); - assert_eq!(ba.remove_list.len(), 9); + assert_eq!(ba.removed.len(), 9); } #[test] diff --git a/core/tests/vec_backend.rs b/core/tests/vec_backend.rs index 84427c689a..73e0db73a7 100644 --- a/core/tests/vec_backend.rs +++ b/core/tests/vec_backend.rs @@ -12,139 +12,56 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::fs::File; +mod common; -use self::core::core::hash::{DefaultHashable, Hash}; -use self::core::core::pmmr::{self, Backend}; -use self::core::core::BlockHeader; -use self::core::ser; -use self::core::ser::{FixedLength, PMMRable, Readable, Reader, Writeable, Writer}; -use croaring; -use croaring::Bitmap; +use self::core::core::pmmr::{VecBackend, PMMR}; +use crate::common::TestElem; use grin_core as core; -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct TestElem(pub [u32; 4]); - -impl DefaultHashable for TestElem {} - -impl FixedLength for TestElem { - const LEN: usize = 16; -} - -impl PMMRable for TestElem { - type E = Self; - - fn as_elmt(&self) -> Self::E { - self.clone() - } -} - -impl Writeable for TestElem { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - writer.write_u32(self.0[0])?; - writer.write_u32(self.0[1])?; - writer.write_u32(self.0[2])?; - writer.write_u32(self.0[3]) - } -} - -impl Readable for TestElem { - fn read(reader: &mut dyn Reader) -> Result { - Ok(TestElem([ - reader.read_u32()?, - reader.read_u32()?, - reader.read_u32()?, - reader.read_u32()?, - ])) - } -} - -/// Simple MMR backend implementation based on a Vector. Pruning does not -/// compact the Vec itself. -#[derive(Clone, Debug)] -pub struct VecBackend { - /// Backend elements - pub data: Vec, - pub hashes: Vec, - /// Positions of removed elements - pub remove_list: Vec, -} - -impl Backend for VecBackend { - fn append(&mut self, data: &T, hashes: Vec) -> Result<(), String> { - self.data.push(data.clone()); - self.hashes.append(&mut hashes.clone()); - Ok(()) - } - - fn get_hash(&self, position: u64) -> Option { - if self.remove_list.contains(&position) { - None - } else { - self.get_from_file(position) - } - } - - fn get_data(&self, position: u64) -> Option { - if self.remove_list.contains(&position) { - None - } else { - self.get_data_from_file(position) - } - } - - fn get_from_file(&self, position: u64) -> Option { - let hash = &self.hashes[(position - 1) as usize]; - Some(hash.clone()) - } - - fn get_data_from_file(&self, position: u64) -> Option { - let idx = pmmr::n_leaves(position); - let data = self.data[(idx - 1) as usize].clone(); - Some(data.as_elmt()) +#[test] +fn leaf_pos_and_idx_iter_test() { + let elems = [ + TestElem([0, 0, 0, 1]), + TestElem([0, 0, 0, 2]), + TestElem([0, 0, 0, 3]), + TestElem([0, 0, 0, 4]), + TestElem([0, 0, 0, 5]), + ]; + let mut backend = VecBackend::new(); + let mut pmmr = PMMR::new(&mut backend); + for x in &elems { + pmmr.push(x).unwrap(); } - - fn data_as_temp_file(&self) -> Result { - unimplemented!() - } - - fn leaf_pos_iter(&self) -> Box + '_> { - unimplemented!() - } - - fn n_unpruned_leaves(&self) -> u64 { - unimplemented!() - } - - fn remove(&mut self, position: u64) -> Result<(), String> { - self.remove_list.push(position); - Ok(()) - } - - fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> { - let idx = pmmr::n_leaves(position); - self.data = self.data[0..(idx as usize) + 1].to_vec(); - self.hashes = self.hashes[0..(position as usize) + 1].to_vec(); - Ok(()) - } - - fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> { - Ok(()) - } - - fn release_files(&mut self) {} - - fn dump_stats(&self) {} + assert_eq!( + vec![0, 1, 2, 3, 4], + pmmr.leaf_idx_iter(0).collect::>() + ); + assert_eq!( + vec![1, 2, 4, 5, 8], + pmmr.leaf_pos_iter().collect::>() + ); } -impl VecBackend { - /// Instantiates a new VecBackend - pub fn new() -> VecBackend { - VecBackend { - data: vec![], - hashes: vec![], - remove_list: vec![], - } +#[test] +fn leaf_pos_and_idx_iter_hash_only_test() { + let elems = [ + TestElem([0, 0, 0, 1]), + TestElem([0, 0, 0, 2]), + TestElem([0, 0, 0, 3]), + TestElem([0, 0, 0, 4]), + TestElem([0, 0, 0, 5]), + ]; + let mut backend = VecBackend::new_hash_only(); + let mut pmmr = PMMR::new(&mut backend); + for x in &elems { + pmmr.push(x).unwrap(); } + assert_eq!( + vec![0, 1, 2, 3, 4], + pmmr.leaf_idx_iter(0).collect::>() + ); + assert_eq!( + vec![1, 2, 4, 5, 8], + pmmr.leaf_pos_iter().collect::>() + ); } diff --git a/store/Cargo.toml b/store/Cargo.toml index e1b9f25526..e007fe128e 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -10,6 +10,7 @@ workspace = ".." edition = "2018" [dependencies] +bit-vec = "0.6" byteorder = "1" croaring = "0.3.9" env_logger = "0.5" diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index 3d5680c57a..54bf6cf5c8 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -148,6 +148,28 @@ impl Backend for PMMRBackend { } } + /// Returns an iterator over all the leaf insertion indices (0-indexed). + /// If our pos are [1,2,4,5,8] (first 5 leaf pos) then our insertion indices are [0,1,2,3,4] + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { + // pass from_idx in as param + // convert this to pos + // iterate, skipping everything prior to this + // pass in from_idx=0 then we want to convert to pos=1 + + let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + + if self.prunable { + Box::new( + self.leaf_set + .iter() + .skip_while(move |x| *x < from_pos) + .map(|x| pmmr::n_leaves(x).saturating_sub(1)), + ) + } else { + panic!("leaf_idx_iter not implemented for non-prunable PMMR") + } + } + fn data_as_temp_file(&self) -> Result { self.data_file .as_temp_file() diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 779f4a5eb3..71b0089327 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -28,6 +28,36 @@ use crate::core::ser::{ Writer, }; +#[test] +fn pmmr_leaf_idx_iter() { + let (data_dir, elems) = setup("leaf_idx_iter"); + { + let mut backend = store::pmmr::PMMRBackend::new( + data_dir.to_string(), + true, + false, + ProtocolVersion(1), + None, + ) + .unwrap(); + + // adding first set of 4 elements and sync + let mmr_size = load(0, &elems[0..5], &mut backend); + backend.sync().unwrap(); + + { + let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + let leaf_idx = pmmr.leaf_idx_iter(0).collect::>(); + let leaf_pos = pmmr.leaf_pos_iter().collect::>(); + + // The first 5 leaves [0,1,2,3,4] are at pos [1,2,4,5,8] in the MMR. + assert_eq!(leaf_idx, vec![0, 1, 2, 3, 4]); + assert_eq!(leaf_pos, vec![1, 2, 4, 5, 8]); + } + } + teardown(data_dir); +} + #[test] fn pmmr_append() { let (data_dir, elems) = setup("append");