diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index c536d11672..3d2a14b1ec 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -14,7 +14,7 @@ //! Transactions -use crate::core::hash::Hashed; +use crate::core::hash::{Hashed, ZERO_HASH}; use crate::core::verifier_cache::VerifierCache; use crate::core::{committed, Committed}; use crate::keychain::{self, BlindingFactor}; @@ -31,7 +31,6 @@ use crate::{consensus, global}; use enum_primitive::FromPrimitive; use std::cmp::Ordering; use std::cmp::{max, min}; -use std::collections::HashSet; use std::sync::Arc; use std::{error, fmt}; @@ -652,14 +651,37 @@ impl TransactionBody { } // Verify that no input is spending an output from the same block. + // Assumes inputs and outputs are sorted fn verify_cut_through(&self) -> Result<(), Error> { - let mut out_set = HashSet::new(); - for out in &self.outputs { - out_set.insert(out.commitment()); - } - for inp in &self.inputs { - if out_set.contains(&inp.commitment()) { - return Err(Error::CutThrough); + let mut inputs_idx = 0; + let mut outputs_idx = 0; + let mut current_input_hash = ZERO_HASH; + let mut current_output_hash = ZERO_HASH; + // set stale to 1 to trigger a recompute on first pass + let mut current_input_hash_stale = 1; + let mut current_output_hash_stale = 1; + while inputs_idx < self.inputs.len() && outputs_idx < self.outputs.len() { + // if our cached hashes are stale + if current_input_hash_stale != inputs_idx { + current_input_hash = self.inputs[inputs_idx].hash(); + current_input_hash_stale = inputs_idx; + } + if current_output_hash_stale != outputs_idx { + current_output_hash = self.outputs[outputs_idx].hash(); + current_output_hash_stale = outputs_idx; + } + // we're about to consume both cached values + // manually call hash, type of v1 and v2 differ + match current_input_hash.cmp(¤t_output_hash) { + Ordering::Less => { + inputs_idx += 1; + } + Ordering::Greater => { + outputs_idx += 1; + } + Ordering::Equal => { + return Err(Error::CutThrough); + } } } Ok(()) @@ -971,24 +993,52 @@ impl Transaction { /// and outputs. pub fn cut_through(inputs: &mut Vec, outputs: &mut Vec) -> Result<(), Error> { // assemble output commitments set, checking they're all unique - let mut out_set = HashSet::new(); - let all_uniq = { outputs.iter().all(|o| out_set.insert(o.commitment())) }; - if !all_uniq { + outputs.sort_unstable(); + if outputs.windows(2).any(|pair| pair[0] == pair[1]) { return Err(Error::AggregationError); } - - let in_set = inputs - .iter() - .map(|inp| inp.commitment()) - .collect::>(); - - let to_cut_through = in_set.intersection(&out_set).collect::>(); - - // filter and sort - inputs.retain(|inp| !to_cut_through.contains(&inp.commitment())); - outputs.retain(|out| !to_cut_through.contains(&out.commitment())); - inputs.sort(); - outputs.sort(); + inputs.sort_unstable(); + let mut inputs_idx = 0; + let mut outputs_idx = 0; + let mut inputs_insert_idx = 0; + let mut outputs_insert_idx = 0; + let mut current_input_hash = ZERO_HASH; + let mut current_output_hash = ZERO_HASH; + // set stale to 1 to trigger a recompute on first pass + let mut current_input_hash_stale = 1; + let mut current_output_hash_stale = 1; + let mut ncut = 0; + while inputs_idx < inputs.len() && outputs_idx < outputs.len() { + // if our cached hashes are stale + if current_input_hash_stale != inputs_idx { + current_input_hash = inputs[inputs_idx].hash(); + current_input_hash_stale = inputs_idx; + } + if current_output_hash_stale != outputs_idx { + current_output_hash = outputs[outputs_idx].hash(); + current_output_hash_stale = outputs_idx; + } + match current_input_hash.cmp(¤t_output_hash) { + Ordering::Less => { + inputs[inputs_insert_idx] = inputs[inputs_idx]; + inputs_idx += 1; + inputs_insert_idx += 1; + } + Ordering::Greater => { + outputs[outputs_insert_idx] = outputs[outputs_idx]; + outputs_idx += 1; + outputs_insert_idx += 1; + } + Ordering::Equal => { + ncut += 1; + inputs_idx += 1; + outputs_idx += 1; + } + } + } + // Cut elements that have already been copied + outputs.drain(outputs_insert_idx..outputs_insert_idx + ncut); + inputs.drain(inputs_insert_idx..inputs_insert_idx + ncut); Ok(()) } @@ -1105,7 +1155,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec) -> Result