From 549c1b98afde50ba2e022abcadda57c60de369a7 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Tue, 17 Jan 2023 18:06:14 +0200 Subject: [PATCH 01/23] rebased. e2e test works. --- verifier/Cargo.toml | 25 ++ verifier/src/checks.rs | 211 +++++++++++ verifier/src/commitment.rs | 115 ++++++ verifier/src/error.rs | 29 ++ verifier/src/label_encoder.rs | 96 +++++ verifier/src/lib.rs | 235 ++++++++++++ verifier/src/pubkey.rs | 40 ++ verifier/src/signed.rs | 59 +++ verifier/src/testdata/key_exchange/README | 29 ++ .../key_exchange/ecdsa/cert_ecdsa.der | Bin 0 -> 539 bytes .../key_exchange/ecdsa/cert_ecdsa.key | 5 + .../testdata/key_exchange/ecdsa/client_random | 1 + .../src/testdata/key_exchange/ecdsa/pubkey | 1 + .../testdata/key_exchange/ecdsa/server_random | 1 + .../src/testdata/key_exchange/ecdsa/signature | 1 + .../testdata/key_exchange/rsa/cert_rsa.der | Bin 0 -> 1535 bytes .../testdata/key_exchange/rsa/cert_rsa.key | 52 +++ .../testdata/key_exchange/rsa/client_random | 1 + verifier/src/testdata/key_exchange/rsa/pubkey | 1 + .../testdata/key_exchange/rsa/server_random | 1 + .../src/testdata/key_exchange/rsa/signature | 1 + verifier/src/testdata/tlsnotary.org/ca.der | Bin 0 -> 1391 bytes verifier/src/testdata/tlsnotary.org/ee.der | Bin 0 -> 1334 bytes verifier/src/testdata/tlsnotary.org/inter.der | Bin 0 -> 1306 bytes verifier/src/testdata/unknown/ca.der | Bin 0 -> 1507 bytes verifier/src/testdata/unknown/ee.der | Bin 0 -> 1535 bytes verifier/src/tls_doc.rs | 135 +++++++ verifier/src/utils.rs | 55 +++ verifier/src/verifier_doc.rs | 238 ++++++++++++ verifier/src/webpki_utils.rs | 355 ++++++++++++++++++ 30 files changed, 1687 insertions(+) create mode 100644 verifier/Cargo.toml create mode 100644 verifier/src/checks.rs create mode 100644 verifier/src/commitment.rs create mode 100644 verifier/src/error.rs create mode 100644 verifier/src/label_encoder.rs create mode 100644 verifier/src/lib.rs create mode 100644 verifier/src/pubkey.rs create mode 100644 verifier/src/signed.rs create mode 100644 verifier/src/testdata/key_exchange/README create mode 100644 verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.der create mode 100644 verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.key create mode 100644 verifier/src/testdata/key_exchange/ecdsa/client_random create mode 100644 verifier/src/testdata/key_exchange/ecdsa/pubkey create mode 100644 verifier/src/testdata/key_exchange/ecdsa/server_random create mode 100644 verifier/src/testdata/key_exchange/ecdsa/signature create mode 100644 verifier/src/testdata/key_exchange/rsa/cert_rsa.der create mode 100644 verifier/src/testdata/key_exchange/rsa/cert_rsa.key create mode 100644 verifier/src/testdata/key_exchange/rsa/client_random create mode 100644 verifier/src/testdata/key_exchange/rsa/pubkey create mode 100644 verifier/src/testdata/key_exchange/rsa/server_random create mode 100644 verifier/src/testdata/key_exchange/rsa/signature create mode 100644 verifier/src/testdata/tlsnotary.org/ca.der create mode 100644 verifier/src/testdata/tlsnotary.org/ee.der create mode 100644 verifier/src/testdata/tlsnotary.org/inter.der create mode 100644 verifier/src/testdata/unknown/ca.der create mode 100644 verifier/src/testdata/unknown/ee.der create mode 100644 verifier/src/tls_doc.rs create mode 100644 verifier/src/utils.rs create mode 100644 verifier/src/verifier_doc.rs create mode 100644 verifier/src/webpki_utils.rs diff --git a/verifier/Cargo.toml b/verifier/Cargo.toml new file mode 100644 index 0000000000..5c51135f81 --- /dev/null +++ b/verifier/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "verifier" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +#tlsn-mpc-core = { path = "../mpc/mpc-core", features = ["garble"] } +blake3 = "1.3.3" +sha2 = "0.10" +p256 = { version = "0.10", features = ["ecdsa"]} +webpki = { version = "0.22.0", features = ["alloc"]} +webpki-roots = "0.22.5" +rand_chacha = "0.3" +rand = "0.8" +rand_core = "0.6" +thiserror = "1" +x509-parser = "0.14.0" +rs_merkle = "1.2.0" +serde = { version = "1.0", features = ["derive"] } +bincode = "1.3.3" + +[dev-dependencies] +hex = "0.4" diff --git a/verifier/src/checks.rs b/verifier/src/checks.rs new file mode 100644 index 0000000000..02be36d2a1 --- /dev/null +++ b/verifier/src/checks.rs @@ -0,0 +1,211 @@ +/// Methods performing various sanity checks on the [crate::verifier_doc::VerifierDocUnchecked] +use crate::verifier_doc::VerifierDocUnchecked; +use crate::{commitment::Range, Error}; + +/// Condition checked: at least one commitment is present +pub fn check_at_least_one_commitment_present( + unchecked: &VerifierDocUnchecked, +) -> Result<(), Error> { + if unchecked.commitments.is_empty() { + return Err(Error::SanityCheckError); + } + Ok(()) +} + +/// Condition checked: commitments and openings have their ids incremental and ascending +pub fn check_commitment_and_opening_ids(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + for i in 0..unchecked.commitments.len() { + if !(unchecked.commitments[i].id == i && unchecked.commitment_openings[i].id == i) { + return Err(Error::SanityCheckError); + } + } + Ok(()) +} + +/// Condition checked: commitment count equals opening count +pub fn check_commitment_and_opening_count_equal( + unchecked: &VerifierDocUnchecked, +) -> Result<(), Error> { + if unchecked.commitments.len() != unchecked.commitment_openings.len() { + return Err(Error::SanityCheckError); + } + Ok(()) +} + +/// Condition checked: ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing +pub fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + for c in &unchecked.commitments { + let len = c.ranges.len(); + // at least one range is expected + if len == 0 { + return Err(Error::SanityCheckError); + } + + for r in &c.ranges { + // ranges must be valid + if r.end <= r.start { + return Err(Error::SanityCheckError); + } + } + + // ranges must not overlap and must be ascending relative to each other + for pair in c.ranges.windows(2) { + if pair[1].start < pair[0].end { + return Err(Error::SanityCheckError); + } + } + + // range bound must not be larger than u32 + if c.ranges[len - 1].end > (u32::MAX as usize) { + return Err(Error::SanityCheckError); + } + } + + Ok(()) +} + +/// Condition checked: the length of each opening equals the amount of committed data in the ranges of the +/// corresponding commitment +/// Condition checked: the total amount of committed data is less than 1GB to prevent DoS +/// (this will cause the verifier to hash up to a max of 1GB * 128 = 128GB of labels) +pub fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + let mut total_committed = 0usize; + + for i in 0..unchecked.commitment_openings.len() { + let expected = unchecked.commitment_openings[i].opening.len(); + let mut total_in_ranges = 0usize; + for r in &unchecked.commitments[i].ranges { + total_in_ranges += r.end - r.start; + } + if expected != total_in_ranges { + return Err(Error::SanityCheckError); + } + total_committed += total_in_ranges; + if total_committed > 1000000000 { + return Err(Error::SanityCheckError); + } + } + Ok(()) +} + +/// Condition checked: the amount of commitments is less that 1000 +/// (searching for overlapping commitments in the naive way which we implemeted has quadratic cost, +/// hence this number shouldn't be too high to prevent DoS) +pub fn check_commitment_count(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + if unchecked.commitments.len() >= 1000 { + return Err(Error::SanityCheckError); + } + Ok(()) +} + +/// Condition checked: each Merkle tree index is both unique and also ascending between commitments +pub fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + let indices: Vec = unchecked + .commitments + .iter() + .map(|c| c.merkle_tree_index) + .collect(); + for pair in indices.windows(2) { + if pair[0] >= pair[1] { + return Err(Error::SanityCheckError); + } + } + Ok(()) +} + +/// Makes sure that if two or more commitments contain overlapping ranges, the openings +/// corresponding to those ranges match exactly. Otherwise, if the openings don't match, +/// returns an error. +pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + // Note: using an existing lib to find multi-range overlap would incur the need to audit + // that lib for correctness. Instead, since checking two range overlap is cheap, we are using + // a naive way where we compare each range to all other ranges. + // This naive way will have redundancy in computation but it will be easy to audit. + + for needle_c in unchecked.commitments.iter() { + // Naming convention: we use the prefix "needle" to indicate the range that we are + // looking for (and to indicate the associates offsets, commitments and openings). + // Likewise the prefix "haystack" indicates _where_ we are searching. + + // byte offset in the opening. always positioned at the beginning of the range + let mut needle_offset = 0usize; + + for needle_range in &needle_c.ranges { + for haystack_c in unchecked.commitments.iter() { + if needle_c.id == haystack_c.id { + // don't search within the same commitment + continue; + } + + // byte offset in the opening. always positioned at the beginning of the range + let mut haystack_offset = 0usize; + // will be set to true when overlap is found + let mut overlap_was_found = false; + + for haystack_range in &haystack_c.ranges { + match overlapping_range(needle_range, haystack_range) { + Some(ov_range) => { + // the bytesize of the overlap + let overlap_size = ov_range.end - ov_range.start; + + // Find position (in the openings) from which the overlap starts. The + // offsets are already pointing to the beginning of the range, we just + // need to add the offset **within** the range. + let needle_ov_start = + needle_offset + (ov_range.start - needle_range.start); + let haystack_ov_start = + haystack_offset + (ov_range.start - haystack_range.start); + + // get the openings which overlapped + // TODO: will later add a method get_opening_by_id() + let needle_o = &unchecked.commitment_openings[needle_c.id]; + let haystack_o = &unchecked.commitment_openings[haystack_c.id]; + + if needle_o.opening[needle_ov_start..needle_ov_start + overlap_size] + != haystack_o.opening + [haystack_ov_start..haystack_ov_start + overlap_size] + { + return Err(Error::OverlappingOpeningsDontMatch); + } + + // even if set to true on prev iteration, it is ok to set again + overlap_was_found = true; + } + None => { + if overlap_was_found { + // An overlap was found in the previous range of the haystack + // but not in this range. There will be no overlap in any + // following haystack ranges of this commitment since all ranges + // within a commitment are sorted ascendingly relative to each other. + break; + } + // otherwise keep iterating + } + } + + // advance the offset to the beginning of the next range + haystack_offset += haystack_range.end - haystack_range.start; + } + } + // advance the offset to the beginning of the next range + needle_offset += needle_range.end - needle_range.start; + } + } + + Ok(()) +} + +/// If two [Range]s overlap, returns the range containing the overlap +fn overlapping_range(a: &Range, b: &Range) -> Option { + // find purported overlap's start and end + let ov_start = std::cmp::max(a.start, b.start); + let ov_end = std::cmp::min(a.end, b.end); + if (ov_end - ov_start) < 1 { + return None; + } else { + return Some(Range { + start: ov_start, + end: ov_end, + }); + } +} diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs new file mode 100644 index 0000000000..c80f48016c --- /dev/null +++ b/verifier/src/commitment.rs @@ -0,0 +1,115 @@ +use crate::LabelSeed; + +use super::error::Error; +use crate::utils::compute_label_commitment; +use rand::Rng; +use rand_chacha::ChaCha12Rng; +use rand_core::SeedableRng; +use serde; +use sha2::{Digest, Sha256}; + +// A User's commitment to a portion of the TLS transcript +#[derive(serde::Serialize)] +pub struct Commitment { + pub id: usize, + pub typ: CommitmentType, + pub direction: Direction, + // The index of this commitment in the Merkle tree of commitments + pub merkle_tree_index: usize, + // the actual commitment + pub commitment: [u8; 32], + // ranges of absolute offsets in the TLS transcript. The committed data + // is located in those ranges. + pub ranges: Vec, +} + +impl Commitment { + pub fn new( + id: usize, + typ: CommitmentType, + direction: Direction, + commitment: [u8; 32], + ranges: Vec, + merkle_tree_index: usize, + ) -> Self { + Self { + id, + typ, + direction, + commitment, + ranges, + merkle_tree_index, + } + } + + /// Check this commitment against the opening. + /// The opening is a (salted) hash of all garbled circuit active labels in the + /// ranges of the Commitment + pub fn verify(&self, opening: &CommitmentOpening, seed: &LabelSeed) -> Result<(), Error> { + // TODO: will change this method to be in agreement with the Label Encoder PR? + let expected = + compute_label_commitment(&opening.opening, seed, &self.ranges, opening.salt.clone())?; + + if expected != self.commitment { + return Err(Error::CommitmentVerificationFailed); + } + + Ok(()) + } +} + +#[derive(Clone, PartialEq, serde::Serialize)] +pub enum CommitmentType { + // a blake3 hash of the garbled circuit wire labels corresponding to the bits + // of the commitment opening + labels_blake3, +} + +// Commitment opening contains either the committed value or a zk proof +// about some property of that value +#[derive(serde::Serialize, Clone, Default)] +pub struct CommitmentOpening { + /// the id of the [Commitment] corresponding to this opening + pub id: usize, + // the actual opening of the commitment. Optional because a zk proof + // about some property of the opening can be provided instead + pub opening: Vec, + // all our commitments are salted by appending 16 random bytes + salt: Vec, +} + +impl CommitmentOpening { + pub fn new(id: usize, opening: Vec, salt: Vec) -> Self { + Self { id, opening, salt } + } +} + +#[derive(serde::Serialize, Clone, PartialEq)] +// A TLS transcript consists of a stream of bytes which were sent to the server (Request) +// and a stream of bytes which were received from the server (Response). The User creates +// separate commitments to bytes in each direction. +pub enum Direction { + Request, + Response, +} + +#[derive(serde::Serialize, Clone)] +/// half-open range [start, end). Range bounds are ascending i.e. start < end +pub struct Range { + pub start: usize, + pub end: usize, +} + +impl Range { + pub fn new(start: usize, end: usize) -> Self { + Self { start, end } + } +} + +// convert a slice of u8 into a vec of bool in the least-bit-first order +pub fn u8_to_boolvec(bytes: &[u8]) -> Vec { + // TODO: need to implement + vec![true; bytes.len() * 8] +} + +fn test() {} diff --git a/verifier/src/error.rs b/verifier/src/error.rs new file mode 100644 index 0000000000..6f1b927122 --- /dev/null +++ b/verifier/src/error.rs @@ -0,0 +1,29 @@ +#[derive(Debug, thiserror::Error, PartialEq)] +pub enum Error { + #[error("Can't verify the document because either signature or pubkey were not provided")] + NoPubkeyOrSignature, + #[error("x509-parser error: {0}")] + X509ParserError(String), + #[error("webpki error: {0}")] + WebpkiError(String), + #[error("unspecified error")] + VerificationError, + #[error("the certificate chain was empty")] + EmptyCertificateChain, + #[error("the end entity must not be a certificate authority")] + EndEntityIsCA, + #[error("the key exchange was signed using an unknown curve")] + UnknownCurveInKeyExchange, + #[error("the key exchange was signed using an unknown algorithm")] + UnknownSigningAlgorithmInKeyExchange, + #[error("Commitment verification failed")] + CommitmentVerificationFailed, + #[error("error while performing sanity check")] + SanityCheckError, + #[error("Failed to verify a Merkle proof")] + MerkleProofVerificationFailed, + #[error("Overlapping openings don't match")] + OverlappingOpeningsDontMatch, + #[error("internal error occured")] + InternalError, +} diff --git a/verifier/src/label_encoder.rs b/verifier/src/label_encoder.rs new file mode 100644 index 0000000000..3088348273 --- /dev/null +++ b/verifier/src/label_encoder.rs @@ -0,0 +1,96 @@ +use std::collections::HashMap; + +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha20Rng; + +/// Encodes wire labels using the ChaCha algorithm and a global offset (delta). +/// +/// An encoder instance is configured using a domain id. Domain ids can be used in combination +/// with stream ids to partition label sets. +#[derive(Debug)] +pub struct ChaChaEncoder { + seed: [u8; 32], + domain: u32, + rng: ChaCha20Rng, + stream_state: HashMap, + delta: u128, +} + +impl ChaChaEncoder { + /// Creates a new encoder with the provided seed + /// + /// * `seed` - 32-byte seed for ChaChaRng + /// * `domain` - Domain id + /// + /// Domain id must be less than 2^31 + pub fn new(seed: [u8; 32], domain: u32) -> Self { + assert!(domain <= u32::MAX >> 1); + + let mut rng = ChaCha20Rng::from_seed(seed); + + // Stream id 0 is reserved to generate delta. + // This way there is only ever 1 delta per seed + rng.set_stream(0); + let delta: u128 = rng.gen(); + + Self { + seed, + domain, + rng, + stream_state: HashMap::default(), + delta, + } + } + + /// Returns encoder's rng seed + pub fn get_seed(&self) -> [u8; 32] { + self.seed + } + + /// Returns next 8 label pairs + /// + /// * `stream_id` - Stream id which can be used to partition label sets + /// * `input` - Circuit input to encode + pub fn labels_for_next_byte(&mut self, stream_id: u32) -> Vec<[u128; 2]> { + self.set_stream(stream_id); + (0..8) + .map(|_| { + //test + let zero_label: u128 = self.rng.gen(); + let one_label = zero_label ^ self.delta; + [zero_label, one_label] + }) + .collect() + } + + /// Sets the selected stream id, restoring word position if a stream + /// has been used before. + fn set_stream(&mut self, id: u32) { + // MSB -> LSB + // 31 bits 32 bits 1 bit + // [domain] [id] [reserved] + // The reserved bit ensures that we never pull from stream 0 which + // is reserved to generate delta + let new_id = ((self.domain as u64) << 33) + ((id as u64) << 1) + 1; + + let current_id = self.rng.get_stream(); + + // noop if stream already set + if new_id == current_id { + return; + } + + // Store word position for current stream + self.stream_state + .insert(current_id, self.rng.get_word_pos()); + + // Update stream id + self.rng.set_stream(new_id); + + // Get word position if stored, otherwise default to 0 + let word_pos = self.stream_state.get(&new_id).copied().unwrap_or(0); + + // Update word position + self.rng.set_word_pos(word_pos); + } +} diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs new file mode 100644 index 0000000000..0ae6cdc2db --- /dev/null +++ b/verifier/src/lib.rs @@ -0,0 +1,235 @@ +mod checks; +mod commitment; +mod error; +mod label_encoder; +mod pubkey; +mod signed; +mod tls_doc; +mod utils; +mod verifier_doc; +mod webpki_utils; + +use crate::signed::Signed; +use blake3::Hasher; +use error::Error; +use pubkey::PubKey; +use verifier_doc::{VerifierDoc, VerifierDocUnchecked}; + +type HashCommitment = [u8; 32]; + +struct VerifierCore { + /// notarization doc which needs to be verified + doc: VerifierDoc, + /// trusted notary's pubkey. If this Verifier is also the Notary then no pubkey needs + /// to be provided, the signature on the [crate::main_doc::MainDoc] will not be checked. + trusted_pubkey: Option, +} + +/// Verifies the core aspects of the notarization session: the Notary signature, the TLS +/// authenticity and the correctness of commitments and zk proofs. +/// +/// After the verification completes, the application level (e.g. HTTP) parser can start +/// parsing the openings in [VerifierDoc::commitment_openings] +impl VerifierCore { + pub fn new( + doc_unchecked: VerifierDocUnchecked, + trusted_pubkey: Option, + ) -> Result { + let doc = VerifierDoc::from_unchecked(doc_unchecked)?; + Ok(Self { + doc, + trusted_pubkey, + }) + } + + /// verifies that the session in the VerifierDoc came from the server with the dns_name + /// + /// Note that the checks below are not sufficient to establish data provenance. + /// There also must be a check done on the HTTP level against the domain fronting + /// attack. + pub fn verify(&self, dns_name: String) -> Result<(), Error> { + // verify the Notary signature, if any + match (&self.doc.signature, &self.trusted_pubkey) { + (Some(sig), Some(pubkey)) => { + self.verify_doc_signature(pubkey, sig, &self.signed_data())?; + } + // no pubkey and no signature, do nothing + (None, None) => (), + // either pubkey or sig is missing + _ => { + return Err(Error::NoPubkeyOrSignature); + } + } + + // verify all other aspects of notarization + self.doc.verify(dns_name)?; + + Ok(()) + } + + // verify Notary's sig on the notarization doc + fn verify_doc_signature( + &self, + pubkey: &PubKey, + sig: &[u8], + msg: &Signed, + ) -> Result { + let serialized = bincode::serialize(&msg).unwrap(); + Ok(pubkey.verify_signature(&serialized, sig)) + } + + // extracts the necessary data from the VerifierDoc into a Signed + // struct and returns it + fn signed_data(&self) -> Signed { + //let doc = &self.doc.clone(); + (&self.doc).into() + } +} + +/// A PRG seeds from which to generate Notary's circuits' input labels for one +/// direction. We will use 2 separate seeds: one to generate the labels for all +/// plaintext which was sent and another seed to generate the labels for all plaintext +/// which was received +type LabelSeed = [u8; 32]; + +pub fn blake3(data: &[u8]) -> [u8; 32] { + let mut hasher = Hasher::new(); + hasher.update(data); + hasher.finalize().into() +} + +#[test] +fn e2e_test() { + use crate::{ + commitment::{Commitment, CommitmentOpening, CommitmentType, Direction, Range}, + signed::SignedTLS, + tls_doc::{ + CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, + SignatureKeyExchangeParams, TLSDoc, + }, + utils::bytes_in_ranges, + Signed, + }; + use p256::ecdsa::{signature::Signer, SigningKey, VerifyingKey}; + use pubkey::KeyType; + use rand::Rng; + use rs_merkle::{algorithms::Sha256, MerkleTree}; + + let mut rng = rand::thread_rng(); + + // After the webserver sends the Server Key Exchange message (during the TLS handshake), + // the tls-client module provides the following TLS data: + + /// end entity cert + static EE: &[u8] = include_bytes!("testdata/tlsnotary.org/ee.der"); + // intermediate cert + static INTER: &[u8] = include_bytes!("testdata/tlsnotary.org/inter.der"); + // certificate authority cert + static CA: &[u8] = include_bytes!("testdata/tlsnotary.org/ca.der"); + let cert_chain = vec![CA.to_vec(), INTER.to_vec(), EE.to_vec()]; + // unix time when the cert chain was valid + static TIME: u64 = 1671637529; + + // data taken from an actual network trace captured with `tcpdump host tlsnotary.org -w out.pcap` + // (see testdata/key_exchange/README for details) + + let cr = + hex::decode("ac3808970faf996d38864e205c6b787a1d05f681654a5d2a3c87f7dd2f13332e").unwrap(); + let sr = + hex::decode("8abf9a0c4b3b9694edac3d19e8eb7a637bfa8fe5644bd9f1444f574e47524401").unwrap(); + let eph_pk = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); + let sig = hex::decode("337aa65793562550f6de0a9c792b5f531a96bb78f65a2063f710bfb99e11c791e13d35c798b50eea1351c14efc526009c7836e888206cebde7135130a1fbc049d42e1d1ed05c10f0d108b9540f049ac24fe1076d391b9da3d4e60b5cb8f341bda993f6002873847be744c1955ff575b2d833694fb8a432898c5ac55752e2bddcee4c07371335e1a6581694df43c6eb0ce8da4cdd497c205607b573f9c5d17c951e0a71fbf967c4bff53fc37c597b2f5656478fefb780e8f37bd8409985dd980eda4f254c7dce76dc69e66ed27c0f2c93b53a6dfd7b27359e1589a30d483725e92305766c62d6cad2c0142d3a3c4a2272e6d81eda2886ef12028167f83b3c33ea").unwrap(); + + let params = SignatureKeyExchangeParams::new(SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, sig); + + let eph_ec = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, eph_pk); + + // Using the above data, the User computes [CommittedTLS] and sends a commitment to the Notary + + let committed_tls = CommittedTLS::new(cert_chain, params, cr, sr); + let commitment_to_tls = blake3(&bincode::serialize(&committed_tls).unwrap()); + + // ---------- After the notar. session is over: + + // The User computes all her commitments + // Here we'll have 1 (salted) commitment which has 1 byterange + + let plaintext = b"This data will be notarized"; + let ranges = vec![Range::new(2, 8)]; + let salt: [u8; 32] = rng.gen(); //TODO change to random salt + + // Note that the User will NOT be actually calling compute_label_commitment(). He doesn't + // have label_seed at this point of the protocol. Instead, the User will + // flatten all his active labels, select those which are located within ranges and will + // hash them. + // + let label_seed = rng.gen(); + let hash_commitment = + utils::compute_label_commitment(plaintext, &label_seed, &ranges, salt.to_vec()).unwrap(); + + let comm = Commitment::new( + 0, + CommitmentType::labels_blake3, + Direction::Request, + hash_commitment, + ranges.clone(), + 0, + ); + + // The User creates a merkle tree of commitments and then a merkle proof of inclusion. + // Sends the merkle_root to the Notary + let leaves = [hash_commitment]; + let merkle_tree = MerkleTree::::from_leaves(&leaves); + let merkle_root = merkle_tree.root().unwrap(); + + // the Notary uses his pubkey to compute a signature + let signing_key = SigningKey::random(&mut rng); + let verifying_key = VerifyingKey::from(&signing_key); + let encoded = verifying_key.to_encoded_point(true); + let pubkey_bytes = encoded.as_bytes(); + + // (note that ephemeralECPubkey is known both to the User and the Notary) + let signed_tls = SignedTLS::new(TIME, eph_ec, commitment_to_tls); + let signed = Signed::new(signed_tls.clone(), label_seed, merkle_root); + + let signature = signing_key.sign(&bincode::serialize(&signed).unwrap()); + let sig_der = signature.to_der(); + let signature = sig_der.as_bytes(); + + // the Notary reveals `label_seed` and also sends the `signature` and `time`. + // After that the User creates a doc for the Verifier: + // (The User creates `signed_tls` just like the Notary did above) + let tls_doc = TLSDoc::new(signed_tls, committed_tls); + + // prepares openings and merkle proofs for those openings + let opening_bytes = bytes_in_ranges(plaintext, &ranges); + let open = CommitmentOpening::new(0, opening_bytes, salt.to_vec()); + + let indices_to_prove = vec![0]; + let proof = merkle_tree.proof(&indices_to_prove); + + let doc = VerifierDoc::new( + 1, + tls_doc, + Some(signature.to_vec()), + label_seed, + merkle_root, + 1, + proof, + vec![comm], + vec![open], + ); + + // The User converts the doc into an unchecked Type and passes it to the Verifier + let doc_unchecked: VerifierDocUnchecked = doc.into(); + + // The Verifier verifies the doc: + + // Initially the Verifier may store the Notary's pubkey as bytes. Converts it into + // PubKey type + let trusted_pubkey = PubKey::from_bytes(KeyType::P256, pubkey_bytes); + + let verifier = VerifierCore::new(doc_unchecked, Some(trusted_pubkey)).unwrap(); + + verifier.verify("tlsnotary.org".to_string()).unwrap(); +} diff --git a/verifier/src/pubkey.rs b/verifier/src/pubkey.rs new file mode 100644 index 0000000000..ffda9b58c4 --- /dev/null +++ b/verifier/src/pubkey.rs @@ -0,0 +1,40 @@ +use p256::{ + self, + ecdsa::{signature::Verifier, Signature}, + EncodedPoint, +}; + +pub enum KeyType { + P256, +} + +pub enum PubKey { + P256(p256::ecdsa::VerifyingKey), +} + +impl PubKey { + pub fn from_bytes(typ: KeyType, bytes: &[u8]) -> Self { + match typ { + KeyType::P256 => { + let point = EncodedPoint::from_bytes(bytes).unwrap(); + PubKey::P256(p256::ecdsa::VerifyingKey::from_encoded_point(&point).unwrap()) + } + _ => panic!(), + } + } + + pub fn verify_signature(&self, msg: &[u8], sig: &[u8]) -> bool { + match *self { + PubKey::P256(key) => { + let signature = Signature::from_der(sig).unwrap(); + key.verify(msg, &signature).unwrap(); + true + } + } + } +} + +#[test] +fn test() { + let key = PubKey::from_bytes(KeyType::P256, &[4; 32]); +} diff --git a/verifier/src/signed.rs b/verifier/src/signed.rs new file mode 100644 index 0000000000..f6e6d96d82 --- /dev/null +++ b/verifier/src/signed.rs @@ -0,0 +1,59 @@ +use crate::{tls_doc::EphemeralECPubkey, HashCommitment, LabelSeed, VerifierDoc}; +use serde::Serialize; + +#[derive(Clone, Serialize)] +// TLS-related struct which is signed by Notary +pub struct SignedTLS { + // notarization time against which the TLS Certificate validity is checked + pub time: u64, + pub ephemeralECPubkey: EphemeralECPubkey, + /// User's commitment to [crate::tls_doc::CommittedTLS] + pub commitment_to_TLS: HashCommitment, +} + +impl SignedTLS { + pub fn new( + time: u64, + ephemeralECPubkey: EphemeralECPubkey, + commitment_to_TLS: HashCommitment, + ) -> Self { + Self { + time, + ephemeralECPubkey, + commitment_to_TLS, + } + } +} + +/// All the data which the Notary signed +#[derive(Clone, Serialize)] +pub struct Signed { + tls: SignedTLS, + /// see comments in [crate::VerifierDoc] about the fields below + pub label_seed: LabelSeed, + /// Merkle root of all the commitments + merkle_root: [u8; 32], +} + +impl Signed { + /// Creates a new struct to be signed by the Notary + pub fn new(tls: SignedTLS, label_seed: LabelSeed, merkle_root: [u8; 32]) -> Self { + Self { + tls, + label_seed, + merkle_root, + } + } +} + +/// Extracts relevant fields from the VerifierDoc. Those are the fields +/// which the Notary signs +impl std::convert::From<&VerifierDoc> for Signed { + fn from(doc: &VerifierDoc) -> Self { + Signed::new( + doc.tls_doc.signed_tls.clone(), + doc.label_seed.clone(), + doc.merkle_root.clone(), + ) + } +} diff --git a/verifier/src/testdata/key_exchange/README b/verifier/src/testdata/key_exchange/README new file mode 100644 index 0000000000..a1c6743321 --- /dev/null +++ b/verifier/src/testdata/key_exchange/README @@ -0,0 +1,29 @@ +// Test data for key exchange was generated by recording the TCP dump: +// +tcpdump 'tcp port 4433' -i lo -w out.pcap +// +// running a TLS server with certificates containing an RSA or an ECDSA pubkey: +// +openssl s_server -port 4433 -tls1_2 -cert testdata/key_exchange/rsa/cert_rsa.der -certform der -key testdata/key_exchange/rsa/cert_rsa.key +openssl s_server -port 4433 -tls1_2 -cert testdata/key_exchange/ecdsa/cert_ecdsa.der -certform der -key testdata/key_exchange/ecdsa/cert_ecdsa.key +// +// connecting with a TLS client (depending on the server pubkey type): +// +openssl s_client -tls1_2 -curves prime256v1 -sigalgs "RSA+SHA256" -connect localhost:4433 +openssl s_client -tls1_2 -curves prime256v1 -sigalgs "ECDSA+SHA256" -connect localhost:4433 + + +// TLS fields were parsed out as hex strings from the resulting TCP dump: +// +// client_random +tshark -r out.pcap -Y "tcp.dstport == 4433" -T fields -e tls.handshake.random +// server_random +tshark -r out.pcap -Y "tcp.srcport == 4433" -T fields -e tls.handshake.random +// ephemeral public key +tshark -r out.pcap -Y "tcp.srcport == 4433" -T fields -e tls.handshake.server_point +// signature over key exchange parameters +tshark -r out.pcap -Y "tcp.srcport == 4433" -T fields -e tls.handshake.sig + + + + diff --git a/verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.der b/verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.der new file mode 100644 index 0000000000000000000000000000000000000000..06feb7594eba04e4462a68c22a0c97299b4d431b GIT binary patch literal 539 zcmXqLViGrKV%)ocnTe5!NrdsW9sl*vLvHe7}$l=BxVvBDEEK%G~-^?l#a=#ydSLRHIN5ME3-%#h&5nW019DQVMfOP zEUX61KngjAm^~N_T$vOS_q;r4HrwKbXJ@vdlqmbxU*~SG+4ziKf4Ri^XSpZ4wlgU* noYVB=4*izDCdu=S$otBP>+iLmoL4w;>ywNF&pjSoahM1I0H~#8 literal 0 HcmV?d00001 diff --git a/verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.key b/verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.key new file mode 100644 index 0000000000..3e5f346cf2 --- /dev/null +++ b/verifier/src/testdata/key_exchange/ecdsa/cert_ecdsa.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEID5R5nGJq5QxP7sb8Ia6QsiSueYmUdr3Q/AC+7yv0QyToAoGCCqGSM49 +AwEHoUQDQgAEMc2lqKMel26JsS+8NTHN0jFm6EVcvxgQ5IM0RnL1OxHOccYFlkel +19DBt0NV6mBnk6cXR6Mpngi3yFsUw4H4Fg== +-----END EC PRIVATE KEY----- diff --git a/verifier/src/testdata/key_exchange/ecdsa/client_random b/verifier/src/testdata/key_exchange/ecdsa/client_random new file mode 100644 index 0000000000..98e15799cb --- /dev/null +++ b/verifier/src/testdata/key_exchange/ecdsa/client_random @@ -0,0 +1 @@ +9be40304de272fc4f6160a82b7c86d3973c86f53b8e81833ad24e6677490d9ad \ No newline at end of file diff --git a/verifier/src/testdata/key_exchange/ecdsa/pubkey b/verifier/src/testdata/key_exchange/ecdsa/pubkey new file mode 100644 index 0000000000..56c3e9348c --- /dev/null +++ b/verifier/src/testdata/key_exchange/ecdsa/pubkey @@ -0,0 +1 @@ +04e7971388f9bc92d008fec1d85b9b8728eefd4a0f3e80bc98a8e42fdd32adc1cee22f202e890eba9aba6c99fcdd90797c83d63c886e7c08dab23bc33b68857d84 \ No newline at end of file diff --git a/verifier/src/testdata/key_exchange/ecdsa/server_random b/verifier/src/testdata/key_exchange/ecdsa/server_random new file mode 100644 index 0000000000..1251c8e39b --- /dev/null +++ b/verifier/src/testdata/key_exchange/ecdsa/server_random @@ -0,0 +1 @@ +fd80a852f87c4339abf70940919e1582f504755f60d78aef12b1444bffc52d2a \ No newline at end of file diff --git a/verifier/src/testdata/key_exchange/ecdsa/signature b/verifier/src/testdata/key_exchange/ecdsa/signature new file mode 100644 index 0000000000..1fa500ef4d --- /dev/null +++ b/verifier/src/testdata/key_exchange/ecdsa/signature @@ -0,0 +1 @@ +30460221009d157747de604128c819070db4a30fef468d5894718d106028404ee23285fe3202210088c602e0c09b6a03da6468f258615037b8899a3bcdf28588533d2eecfc3a262b \ No newline at end of file diff --git a/verifier/src/testdata/key_exchange/rsa/cert_rsa.der b/verifier/src/testdata/key_exchange/rsa/cert_rsa.der new file mode 100644 index 0000000000000000000000000000000000000000..214c06880d49313586177abcdc5aa6b15166ee43 GIT binary patch literal 1535 zcmXqLV*PE<#Qb;xGZP~dlSrPXiQAp>xY>N{69jfFJb%1r-}RLSylk9WZ60mkc^MhG zSs4sm4Y>_C*_cCF*o2uJLk)!u1VJ1Q9W)WpQdaHPTT8u#H-SLWYye7AaU{-QZa_ncI&6mR7FG4)$(gy6gV)vXt< ze08lEN0NMkemoOYtg+H#*%yUx=8 zU%u=+p=WzeH9Yq2JXZ((V81KBS?~W(bTZ#wem1=A`=f8g@4l~;)nS#2?3_O>)+Kw5 z>LEF!*15;m_6gn7vRq*HU|;PF%ZV`=rHlV+R-9zkEL(L$x#sLGYv0wky5`LKrJ(v* z+m_FFyFa^4?e#qiJtq4rJ1<~e(if-pd+V8=6~4wHY%>=9bZ9EvFlnjQ_r@rxEe>%9 z*30ak()YJ!8RPNVU4?H84@_XzVB73vmbGDxXyc2+F??>Txn=Tpzs+_P+*vL2Q zgn4QD)Xh&zG=J+we?ByS)#5gfExVN^dbV=&tE$HDugKUb(vXw8P1o(oL~-3yGVfcZ z6xW<*oj)niOlm`Ri0HGQH@_}xUYK&FTWQaI<0UhXZu~gED=WOf-17uGSH0z@YrbFd z@Aq1!>;68V!l7++%hE9HkXzxlN7MH*pZE3OzH~|E^>f?2=1spc`S*gP&9{mof=x^J zuVC`{u~UaTkWJ>Cp|QTje~ve7-xjcJf9mk~V37aB4}na~j0}v68y^`o-Z$U_rde5j zM#ldvEX+)-3k+mId{q`P0}(b3Z8k<$R(57aIE%@E2P7>FQqE++08z}xBE}-}Z~6K? zk(vcU>vkm@`{+;j8GQMYhCyQkNVPIcr9pXv$bD{x60uEp8+dkajoIxmKlM46wBg&IT#oHIirpirP*|%09)^~OYay9_wV_-I5WDvL!6v*glI_<{FDSh^0 z!2!>|nFw1|&VR4!{9l2|IdIe6i0(p5xjCNl|K`N_Op}aE^=(eNBe!+AbLhrA2l<^b z!ad6~C!agA`5lkYo!bAiZwfyBm?*D*R+~lI$i-r@SnE6OaQkxw5$orj4=;^X%DE>@arAl;!M=IJE{_`p&TGYY@3pNgQZ<`(tboJAF}pED{MOO4Z;*+0>oNs;Y_`)Avnf3sgW z*P=@y?X+{R`p-X~YAn7lThP6eZRuk{^;iBT)hln$NJ_nVZ9?z#GgmHL{`qO=9s47f zPt32~*|V_s@}D~1zn$9-FFfAxpF6WXgZ)V3?;|ZHu@PK(Uu?6LJl^wtpYC()L=oGn zmRo;1S$JpnU%C{l-@WI|HRfooGP_+x_pZC_I8&BOh z==MGRj*-iP88yPzw=Q-G@8xmKUc@YS$6(FA{b#a45#{ta(#$Ei!!dD>!GlW&|;Oio?pWMd9xVH0Kw4K~y?PzQ0igcUsVN>YpRQcDzqQMi96N{2F6x@sQ zOA8D|4TM2TnT2^ggM-`^g7WiA6e0`_)UeSUJ_S;M+V-u>HW)=goaf7yL{%}fvF;1?F_{JHX*^)7mb z_cWAjyQP1@qPLp4KvBB%lYz~z{&jb6C9i%h=6|S9(7WzD_ly5q%k{o&s`h%|Bc#ex z(95j3;9;=J8{wPpB=-w!_Uf_kT$~tqZ%sS8l;RAn=gy-c5l%vESRjulRoaDHHpQelw1#&mWmj<25Ut_nWV1qwMTG%s)L@ zZ#3Rz-J*5P@#PxEvZ-ABH|}5EDDklY(M=kbokat@+bL(=ez`Qo=d9_8$g;*;h-`WLMh;lRc_g>Iv-DFqo zCF5PpD)i^rs|NwXHO`YuHlHea-Y3t;=GdnK4#`;nE(6$dNYTB&bR(NQ2+$oz?wqHJLsjX!HYm3h*_fBZ@a%uek ze*2NA(-ox)>ah}I#svAgPldH?sMd^L9VXJTe#U|j5E;9$T9Os}&1 zjEw(TSb({M&43@o7Y6ZJ4VZzHfhFz=l^iUlGsD^9O_ z?o;@C)1`#9mMgeli7SS+ehlD?e0}ag-X~KPhVT7{&D4o6YKug*3J5*#Pa(8&H7gpwUsuC^Ywq~GKr43@rUtb$j%*V zXSzC!JAHIpY?|)Bn-;WsJ~s2)HigcR z-KW{sqcnToqipMNtEK|qJDkTmPjj*R=DdjQJNf?H>f^h&YWulf^SYpR=4sI>j;y6q zAB!&hzU1vmo%p4{|F6+t(%W~vdiUeP>Iq_(+2h=TYs}f5dM+QCHs|Whty&MJN;P<_ z^RZ+cWl3o${YKsGG(t*4WP8`@Gk9!gJ;MzXRq} z=D1zmBD#56Ufpb-X;wRebnUN2Km5&csO6u^ip8C`)?_`D(Av1dIWhXO{2lAwvQN4% zdQ0z%8|T;t|E@mm82|syq6>)@52x)|6WeWmz4WT_ftiBq<~klMDs9=vuHV8`f;+G2mt6)N1o+`_9YA$j!=N zU}VT`z{$oO%EBhh6dG(OW*`FMa0&DJq?V``E4bz*7gZLNz!WhHGX)tN$cghB85tTH z85$TFm>3&G0lCINgDgQ@Bg-fQ379d=!n`Fp#d-N9iA9xq`9&GYscH=pMa_g{3n#^gwv(6jfSXNXsxOnsN;BR^|Wt?@hd zUwWcR-?x00zv{2IF@1%n)BeY2W^FfcJCyNNV`WNazK?f`%E>!#-R3WmKlDD|?82T* z^&O9{NfbEfZ29`G|2uC_9`mH^>Q-A+R9ooB7v=^F)=4^7=9vIhXV-IeGmy zE8_$Y?+FS$7FApcC6y9t27L+K;yYCqG~BlrVYtls+{E29xhCY&g1HaFFP;{@Y-6A@ zz3-I?!~8QjJXzmE_QWOJaWg!`dGF@o$H{X2GDjy&cyX(3U+u@k@=|S?v$-E^;&JCV zF)30zP5;+Ii-r0Eso$pm6>B$UVrFDuT-?MIWzfVFX21uG3R!+e#{VoV%uK8c3}iuk zRTfYjacHwKva+%>Gs0O+20S2XVUThr0|tmSv(Eg4HRK~1I9Md zjFOT9D}DVUW4(O6oK#>M0mrTqTnST~7(z)VLW#bCHppH<78L`fCe*aiBv@Wvu7{dj zd_am7SX>R94LI1?(i?dgnHUYs*|>q8XJKr*5A-Xefrfz^%vvT!F)66E$pytgTT6;T zZUd%YXyRjL;{t2FR>{P|#M1bMrST)fdxkQGDx;o|8{2iaDZH|H5G41qMXbO`xYgJC zlJ-HX@Wnx|gBTbXTV|;IxX-}A!pz`q;L4=P&~!(xOE=|Mg3aH3ku}`c+=@%CDCng% z+gQ5n3HEv0ew9hV-*~~I9mkUz&nWNGTEjW-{HN{Jx2LxBEnFAB=t!8D&KHJKh8K1h zZrOchGc|f1uCb>x`y}(_(-#G^-@MLzSYDgUwl@!I?;)_gZU!z)3Nr2|43~%>PP!ev z;klxgHJ7V&*MEIm7GK|#4^1j68<#ODn6ci+v87gp1D`pW^c+s$5Il@}~Y;eW$> zsOPc!uQ|y14_E*I^B*IFw21MP+X44NtClSMvCnv8`;JSZzdaUq-B#TGsQpLAQB~)% z4Q7dzFZDz-dRLi0emwcS{@sU>nrkXVrZH56GEC%A*krazVp|En`Mkbyf%o34m9v^#_`8>%cDR2$!tDA!*PhMdPk!#>TNU#5o!RHD>hDWbe2+6qefMen z^y9wXkM=J!qz&JYE{~5g;AP{~YV&CO&dbQi&B|cl zZ^&)H$;KSY!Y0fV8f>U(pbp}22`hN!m82HsrIsiJrzV#cWtLPb1f>?ICKe@UD7Y8p zmlha`8VG}wG7Ix~1_!w-1m)+KC`1?<$cghB85md^m>7UT6p(9bU}i>{#klr@pXRpDI%e!;XU(K zEV7jR+GOLj(l76;^&6z(8 z#AA0A<^OIxy7p3Agsu4T=bXDgYJYFLbMRJS>=n1iXV$77?AVi#UYfS~qr~?`G0%ek zTXk%6U;1BI;?)e!a{IZ#KhHBh{kp6`Tx5OnlK(R|Po8@xcsbiYk5<`*nd?+bcMG2h zV*dIzaAEgtQ6^?a2FArrj2yraVKLwX2B<7QBjbM-7GNT1Gmr)GRarnG&7sZ4$jZvj z%mimK8VG@;g+aGrR#$eo~%5Jf`OcY%mS$e637aZkrgU|Y*%2BHjp&nU}H;f0^%a4a^@zWr&>?>x!W!N-s;l2=W2SzrWwg=OMT_0*&3%7h3Gae zcy;*g4~6~lXSNqGY|pd)7B}VI6NUN9-gj?ee!gg{n9amW8&X z%-(q}=2Oav1N$lu`1j?y@Wf5pt@piK;Nc5d7tPy|3U8BlD*g~sn=(0kfov+vK`y0r z0=&-C7fQ(2J$TS&zBOv&UW5JZdD>e475$`H4}X=I{vmG7;iWsWKIrYSHs1Np``(#9 zPu90^x7i;EbvFB!@z6{>t8eDT|4SW~n`}RDA=%Q@vNZ40uCf!8nO?5+&JjPy!*p|R z$I5B||t$-I`!kiAsP`9|pUq9dAo-;c!loml7AVsQOaYrMq5%H7Z7 z3cA@JwoN{~v;R(Fp{myU`)^ePf-<@%-FbR#>*HIs7us`L6b;ukef_<2^@&b#+lM|+ zE%?6e)!sX;QRMa2+qMeJ>mn~d`VsLndWXl^e=+`In*ZcNmDisT+|c`~X7U7a{l9A# zak{(Ne|WiJ`+p7J45Mr5adMf9C-3+=w_Bh4Qjqhqe53GGU!%tR7QwBtb+KuhuXfyh uGIi_Otzkk=XOH+DQ?+mj$bEB;AyneuOV5-mey66-*%E!Ac*W`+?uP)U>;Po| literal 0 HcmV?d00001 diff --git a/verifier/src/testdata/unknown/ca.der b/verifier/src/testdata/unknown/ca.der new file mode 100644 index 0000000000000000000000000000000000000000..9cca20e7f1df680c97455b49966c0508fe19dbbe GIT binary patch literal 1507 zcmXqLV!dzB#C&`KGZP~dlSrBR`mbT}_64%3a+6l}tyPHiogHGp%f_kI=F#?@mywa1 zmBGN(klTQhjX9KsO_<3s)KJ(!5X9l&;R?>rP1Ox9Ni0bLGq-T~CD+H8OD)^M77|4n98W|Y^k)f%Hv4KexkZTI$8bG7e~_->*0fcN3y$G>o>8TMbFC_m}(*J;nE zTc|yn9ed}_3;S7;{#nx2Uhwfq3Xj$&wH?b|c4l3+ znR#Qfv57rvwpL~2mRbHLT8=Lr|E%Rty(}EuR^syR(a&=>+wuZt%FoYP`ZhNB4oBJa zwK;XSPP-Rn?^>=Iwdzakgb3Dv-8UA$*|a-Ocvjsb2iMTBKlV94zlXev`ZIZwezMVe zOMgYvhxP3C$6{YF^@(jTWXP|%w{@HU+~1o&*T~FJoFfvFJH?}MC+EY{Qs>1c%2rBy z@@xP9HOq0sOyY&< z-(6GQHg>LDGvRGYOX88&$F{G?4PAHa$dvYyo5S%2U6POIMK67J3EF{(G8?|Lo!%`NR_j`6$%?-k5_ ztOtzO)aEYOy8PD3&)fYMrWkwQx?8+3>NOr|gsf2K*^Qt=%NTm|;`dGGnPP0leQh4q1+kk2;8jeK_u^Z9~{ z_IrM4Ob*u5bj$zOdCTI)zb6wv`mQZ|DZI;?KW^VaCT2zk#>I`t3>ps`@B!1TEI%XT ze-;*Cc3?B$2l0hjI1Jc;V!*UwAPW-UV-aH!xzEi|BDU#n1JCZQF}oe+r#|PBwlrvL z0Ld$}R2q~wU{$fubBQ~Z5_S_Yc@>z{85uI(@kb|qd92K;o?@aUqrc3$hPR{h$U4^6 zIkAsAA6(bz(SGpORd`ym5<6GxkClQMR$o7#>*Fqdy7kv`rHVf}PdPnS^Quc#hvdi9 zJh;ddAoEms_6Md{-AgRr&Sm?oo5{PkHL&;P#```3$$^F&cAXcR&V1e0+|l^QEv-v0 z&p-S&tNrJ@swyv?X^I-jHOm#}CLg)1aktOK#g{=x`qS?0UF-%r$-I9)%=I(8-dq{y zGHZh0f%sLsO2tZ41e$}FpYO|JVcL2?qPU@St@yP{X?d=qiTmdqnB(b__4I&GH$Tf1 zHA(Gns;Bpvy*F6P=lVX5PrH17S+MKv^%D(K_H0$zGxxxVVhZj~+c@GLf~0`CQM0X$$|aPfX%XQkFU16CHk6 zPilg-i|4UK)t7dhu525opPI8jV3zb#zG01xLbBC@#co-STV7r= z%(<+q7BS^A3M!WoM5#-eWdRa))+e#)!7p?xbFgy&^o+Tr)PiwD?-Nny8?)Dz|Wj^h|hZU0Rwz&QU09|Nx A2><{9 literal 0 HcmV?d00001 diff --git a/verifier/src/testdata/unknown/ee.der b/verifier/src/testdata/unknown/ee.der new file mode 100644 index 0000000000000000000000000000000000000000..214c06880d49313586177abcdc5aa6b15166ee43 GIT binary patch literal 1535 zcmXqLV*PE<#Qb;xGZP~dlSrPXiQAp>xY>N{69jfFJb%1r-}RLSylk9WZ60mkc^MhG zSs4sm4Y>_C*_cCF*o2uJLk)!u1VJ1Q9W)WpQdaHPTT8u#H-SLWYye7AaU{-QZa_ncI&6mR7FG4)$(gy6gV)vXt< ze08lEN0NMkemoOYtg+H#*%yUx=8 zU%u=+p=WzeH9Yq2JXZ((V81KBS?~W(bTZ#wem1=A`=f8g@4l~;)nS#2?3_O>)+Kw5 z>LEF!*15;m_6gn7vRq*HU|;PF%ZV`=rHlV+R-9zkEL(L$x#sLGYv0wky5`LKrJ(v* z+m_FFyFa^4?e#qiJtq4rJ1<~e(if-pd+V8=6~4wHY%>=9bZ9EvFlnjQ_r@rxEe>%9 z*30ak()YJ!8RPNVU4?H84@_XzVB73vmbGDxXyc2+F??>Txn=Tpzs+_P+*vL2Q zgn4QD)Xh&zG=J+we?ByS)#5gfExVN^dbV=&tE$HDugKUb(vXw8P1o(oL~-3yGVfcZ z6xW<*oj)niOlm`Ri0HGQH@_}xUYK&FTWQaI<0UhXZu~gED=WOf-17uGSH0z@YrbFd z@Aq1!>;68V!l7++%hE9HkXzxlN7MH*pZE3OzH~|E^>f?2=1spc`S*gP&9{mof=x^J zuVC`{u~UaTkWJ>Cp|QTje~ve7-xjcJf9mk~V37aB4}na~j0}v68y^`o-Z$U_rde5j zM#ldvEX+)-3k+mId{q`P0}(b3Z8k<$R(57aIE%@E2P7>FQqE++08z}xBE}-}Z~6K? zk(vcU>vkm@`{+;j8GQMYhCyQkNVPIcr9pXv$bD{x60uEp8+dkajoIxmKlM46wBg&IT#oHIirpirP*|%09)^~OYay9_wV_-I5WDvL!6v*glI_<{FDSh^0 z!2!>|nFw1|&VR4!{9l2|IdIe6i0(p5xjCNl|K`N_Op}aE^=(eNBe!+AbLhrA2l<^b z!ad6~C!agA`5lkYo!bAiZwfyBm?*D*R+~lI$i-r@SnE6OaQkxw5$orj4=;^X%DE>@arAl;!M=IJE{_`p&TGYY@3pNgQZ<`(tboJAF}pED{MOO4Z;*+0>oNs;Y_`)Avnf3sgW z*P=@y?X+{R`p-X~YAn7lThP6eZRuk{^;iBT)hln$NJ_nVZ9?z#GgmHL{`qO=9s47f zPt32~*|V_s@}D~1zn$9-FFfAxpF6WXgZ)V3?;|ZHu@PK(Uu?6LJl^wtpYC()L=oGn zmRo;1S$JpnU%C{l-@WI|HRfooGP_+x_pZC_I8&BOh z==MGRj*-iP88yPzw=Q-G@8xmKUc@YS$6(FA{b#a45#{ta(#$Ei!!dD>!GlW&|;Oio Self { + Self { + signed_tls, + committedTLS, + } + } + + /// Verifies the TLSDoc. Checks that `hostname` is present in the leaf certificate. + pub fn verify(&self, hostname: String) -> Result<(), Error> { + // Verify TLS certificate chain against local root certs. Some certs in the chain may + // have expired at the time of this verification. We verify their validity at the time + // of notarization. + webpki_utils::verify_cert_chain(&self.committedTLS.tls_cert_chain, self.signed_tls.time)?; + + let leaf_cert = webpki_utils::extract_leaf_cert(&self.committedTLS.tls_cert_chain)?; + + self.check_tls_commitment(&self.committedTLS, &self.signed_tls.commitment_to_TLS)?; + + //check that TLS key exchange parameters were signed by the leaf cert + webpki_utils::verify_sig_ke_params( + &leaf_cert, + &self.committedTLS.sig_ke_params, + &self.signed_tls.ephemeralECPubkey, + &self.committedTLS.client_random, + &self.committedTLS.server_random, + )?; + + webpki_utils::check_hostname_present_in_cert(&leaf_cert, hostname)?; + + Ok(()) + } + + // check the commitment to misc TLS data + fn check_tls_commitment( + &self, + committedTLS: &CommittedTLS, + commitment: &[u8; 32], + ) -> Result<(), Error> { + let s = serialize(committedTLS).unwrap(); + // hash `serialize` and compare to `commitment` + Ok(()) + } +} + +// an x509 cert in DER format +pub type CertDER = Vec; + +// Misc TLS data which the User committed to before the User and the Notary engaged in 2PC +// to compute the TLS session keys +// +// The User should not reveal `tls_cert_chain` because the Notary would learn the webserver name +// from it. The User also should not reveal `signature_over_ephemeral_key` to the Notary, because +// for ECDSA sigs it is possible to derive the pubkey from the sig and then use that pubkey to find out +// the identity of the webserver. +// +// Note that there is no need to commit to the ephemeral key because it will be signed explicitely +// by the Notary +#[derive(Clone, Serialize)] +pub struct CommittedTLS { + pub tls_cert_chain: Vec, + sig_ke_params: SignatureKeyExchangeParams, + client_random: Vec, + server_random: Vec, +} + +impl CommittedTLS { + pub fn new( + tls_cert_chain: Vec, + sig_ke_params: SignatureKeyExchangeParams, + client_random: Vec, + server_random: Vec, + ) -> Self { + Self { + tls_cert_chain, + sig_ke_params, + client_random, + server_random, + } + } +} + +/// Types of the ephemeral EC pubkey supported by TLSNotary +#[derive(Clone, Serialize)] +pub enum EphemeralECPubkeyType { + P256, + ED25519, +} + +/// The ephemeral EC public key (part of the TLS key exchange parameters) +#[derive(Clone, Serialize)] +pub struct EphemeralECPubkey { + pub typ: EphemeralECPubkeyType, + pub pubkey: Vec, +} + +impl EphemeralECPubkey { + pub fn new(typ: EphemeralECPubkeyType, pubkey: Vec) -> Self { + Self { typ, pubkey } + } +} + +/// Algorithms that can be used for signing the TLS key exchange parameters +#[derive(Clone, Serialize)] +pub enum SigKEParamsAlg { + RSA_PKCS1_2048_8192_SHA256, + ECDSA_P256_SHA256, +} + +/// signature over the TLS key exchange params +#[derive(Clone, Serialize)] +pub struct SignatureKeyExchangeParams { + pub alg: SigKEParamsAlg, + pub sig: Vec, +} + +impl SignatureKeyExchangeParams { + pub fn new(alg: SigKEParamsAlg, sig: Vec) -> Self { + Self { alg, sig } + } +} diff --git a/verifier/src/utils.rs b/verifier/src/utils.rs new file mode 100644 index 0000000000..c3541b99dd --- /dev/null +++ b/verifier/src/utils.rs @@ -0,0 +1,55 @@ +use crate::{ + commitment::{u8_to_boolvec, Range}, + Error, LabelSeed, +}; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha20Rng; +use sha2::{Digest, Sha256}; + +// Given the plaintext (the opening) and the seed, compute a (salted) commitment to the garbled circuit labels +// in the byte ranges. +pub fn compute_label_commitment( + plaintext: &[u8], + seed: &LabelSeed, + ranges: &Vec, + salt: Vec, +) -> Result<[u8; 32], Error> { + // TODO: will need to bring this in harmony with label encoder in mpc-core + + let mut rng = ChaCha20Rng::from_seed(*seed); + let delta: u128 = rng.gen(); + let mut bits_iter = u8_to_boolvec(plaintext).into_iter(); + + // for each bit of opening, expand the zero label at the rng stream offset + // and, if needed, flip it to the one label, then hash the label + let mut hasher = Sha256::new(); + for r in ranges { + // set rng stream offset to the first label in range. +1 accounts for + // the delta + rng.set_word_pos(4 * ((r.start as u128) + 1)); + + // expand as many labels as there are bits in the range + (0..(r.end - r.start) * 8).map(|_| { + let zero_label: u128 = rng.gen(); + let active_label = if bits_iter.next().unwrap() == true { + zero_label ^ delta + } else { + zero_label + }; + hasher.update(active_label.to_be_bytes()); + }); + } + // add salt + hasher.update(salt); + Ok(hasher.finalize().into()) +} + +/// Returns a substring of the original bytestring containing only the bytes in `ranges` +// TODO check len overflow +pub fn bytes_in_ranges(bytestring: &[u8], ranges: &[Range]) -> Vec { + let mut substring: Vec = Vec::new(); + for r in ranges { + substring.append(&mut bytestring[r.start..r.end].to_vec()) + } + substring +} diff --git a/verifier/src/verifier_doc.rs b/verifier/src/verifier_doc.rs new file mode 100644 index 0000000000..ca7c28f7a4 --- /dev/null +++ b/verifier/src/verifier_doc.rs @@ -0,0 +1,238 @@ +use super::LabelSeed; +use crate::{ + checks, + commitment::{Commitment, CommitmentOpening, CommitmentType}, + error::Error, + tls_doc::TLSDoc, +}; +use rs_merkle::{algorithms, proof_serializers, MerkleProof}; +use serde::ser::Serializer; +use std::collections::HashMap; + +#[derive(serde::Serialize)] +/// The notarization document received from the User after all sanity checks passed +pub struct VerifierDoc { + version: u8, + pub tls_doc: TLSDoc, + /// Notary's signature over the [Signed] portion of this doc + pub signature: Option>, + + // GC wire labels seed for the request data and the response data + // This is the seeds from which IWLs are generated in + // https://docs.tlsnotary.org/protocol/notarization/public_data_commitment.html + pub label_seed: LabelSeed, + + // The root of the Merkle tree of commitments. The User must prove that each [Commitment] is in the + // Merkle tree. + // This approach allows the User to hide from the Notary the exact amount of commitments thus + // increasing User privacy against the Notary. + // The root was made known to the Notary before the Notary opened his garbled circuits + // to the User + pub merkle_root: [u8; 32], + + // The total leaf count in the Merkle tree of commitments. Provided by the User to the Verifier + // to enable merkle proof verification. + pub merkle_tree_leaf_count: usize, + + // A proof that all [commitments] are the leaves of the Merkle tree + #[serde(serialize_with = "merkle_proof_serialize")] + pub merkle_multi_proof: MerkleProof, + + // User's commitments to various portions of the TLS transcripts, sorted ascendingly by id + commitments: Vec, + + // Openings for the commitments, sorted ascendingly by id + commitment_openings: Vec, +} + +/// Serialize the [MerkleProof] type using its native `serialize` method +fn merkle_proof_serialize( + proof: &MerkleProof, + serializer: S, +) -> Result +where + S: Serializer, +{ + let bytes = proof.serialize::(); + serializer.serialize_bytes(&bytes) +} + +impl VerifierDoc { + /// Creates a new doc. This method is called by the User. When passing the created doc + /// to the Verifier, the User must convert this doc into VerifierDocUnchecked + pub fn new( + version: u8, + tls_doc: TLSDoc, + signature: Option>, + label_seed: LabelSeed, + merkle_root: [u8; 32], + merkle_tree_leaf_count: usize, + merkle_multi_proof: MerkleProof, + commitments: Vec, + commitment_openings: Vec, + ) -> Self { + Self { + version, + tls_doc, + signature, + label_seed, + merkle_root, + merkle_tree_leaf_count, + merkle_multi_proof, + commitments, + commitment_openings, + } + } + + /// Returns a new VerifierDoc after performing all sanity checks. This is the only way + /// for the Verifier to derive VerifierDoc + pub fn from_unchecked(unchecked: VerifierDocUnchecked) -> Result { + // Performs the following sanity checks: + // + // - at least one commitment is present + checks::check_at_least_one_commitment_present(&unchecked)?; + + // - commitments and openings have their ids incremental and ascending + checks::check_commitment_and_opening_ids(&unchecked)?; + + // - commitment count equals opening count + checks::check_commitment_and_opening_count_equal(&unchecked)?; + + // - ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing + checks::check_ranges_inside_each_commitment(&unchecked)?; + + // - the length of each opening equals the amount of committed data in the ranges of the + // corresponding commitment + // - the total amount of committed data is less than 1GB to prevent DoS + checks::check_commitment_sizes(&unchecked)?; + + // - the amount of commitments is less that 1000 + checks::check_commitment_count(&unchecked)?; + + // - overlapping openings must match exactly + checks::check_overlapping_openings(&unchecked)?; + + // - each [merkle_tree_index] is both unique and also ascending between commitments + checks::check_merkle_tree_indices(&unchecked); + + Ok(Self { + version: unchecked.version, + tls_doc: unchecked.tls_doc, + signature: unchecked.signature, + label_seed: unchecked.label_seed, + merkle_root: unchecked.merkle_root, + merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, + merkle_multi_proof: unchecked.merkle_multi_proof, + commitments: unchecked.commitments, + commitment_openings: unchecked.commitment_openings, + }) + } + + /// verifies the Doc + pub fn verify(&self, dns_name: String) -> Result<(), Error> { + // verify the TLS portion of the doc. The cert must contain dns_name + self.tls_doc.verify(dns_name)?; + + self.verify_merkle_proofs()?; + + self.verify_commitments()?; + + Ok(()) + } + + /// Verifies that each commitment is present in the Merkle tree. Note that we already checked + /// in [checks::check_merkle_tree_indices] that indices are unique and ascending + fn verify_merkle_proofs(&self) -> Result<(), Error> { + // collect all merkle tree leaf indices and corresponding hashes + let (leaf_indices, leaf_hashes): (Vec, Vec<[u8; 32]>) = self + .commitments + .iter() + .map(|c| (c.merkle_tree_index, c.commitment)) + .unzip(); + + if !self.merkle_multi_proof.verify( + self.merkle_root, + &leaf_indices, + &leaf_hashes, + self.merkle_tree_leaf_count, + ) { + return Err(Error::MerkleProofVerificationFailed); + } + + Ok(()) + } + + fn verify_commitments(&self) -> Result<(), Error> { + self.verify_label_commitments()?; + + // verify any other types of commitments here + + Ok(()) + } + + // Verify each label commitment against its opening + fn verify_label_commitments(&self) -> Result<(), Error> { + // collect only label commitments + let label_commitments: Vec<&Commitment> = self + .commitments + .iter() + .filter(|c| c.typ == CommitmentType::labels_blake3) + .collect(); + + // map each opening to its id + let mut openings_ids: HashMap = HashMap::new(); + for o in &self.commitment_openings { + openings_ids.insert(o.id, o); + } + + // collect only openings corresponding to label commitments + let mut openings: Vec<&CommitmentOpening> = Vec::with_capacity(label_commitments.len()); + for c in &label_commitments { + match openings_ids.get(&c.id) { + Some(opening) => openings.push(opening), + // should never happen since we already checked that each opening has a + // corresponding commitment in [VerifierDoc::from_unchecked()] + _ => return Err(Error::InternalError), + } + } + + // verify each (commitment, opening) pair + for (o, c) in openings.iter().zip(label_commitments) { + c.verify(o, &self.label_seed)?; + } + + Ok(()) + } +} + +/// This is the [VerifierDoc] in its unchecked form. This is the form in which the doc is received +/// by the Verifier from the User. +pub struct VerifierDocUnchecked { + /// All fields are exactly as in [VerifierDoc] + version: u8, + pub tls_doc: TLSDoc, + pub signature: Option>, + pub label_seed: LabelSeed, + pub merkle_root: [u8; 32], + pub merkle_tree_leaf_count: usize, + pub merkle_multi_proof: MerkleProof, + pub commitments: Vec, + pub commitment_openings: Vec, +} + +/// Converts VerifierDoc into an unchecked type with will be passed to the Verifier +impl std::convert::From for VerifierDocUnchecked { + fn from(doc: VerifierDoc) -> Self { + Self { + version: doc.version, + tls_doc: doc.tls_doc, + signature: doc.signature, + label_seed: doc.label_seed, + merkle_root: doc.merkle_root, + merkle_tree_leaf_count: doc.merkle_tree_leaf_count, + merkle_multi_proof: doc.merkle_multi_proof, + commitments: doc.commitments, + commitment_openings: doc.commitment_openings, + } + } +} diff --git a/verifier/src/webpki_utils.rs b/verifier/src/webpki_utils.rs new file mode 100644 index 0000000000..dfc5ab27bd --- /dev/null +++ b/verifier/src/webpki_utils.rs @@ -0,0 +1,355 @@ +use super::tls_doc::{ + CertDER, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, SignatureKeyExchangeParams, +}; +use crate::Error; +use x509_parser::{certificate, prelude::FromDer}; + +type SignatureAlgorithms = &'static [&'static webpki::SignatureAlgorithm]; + +/// When validating the certificate chain, we expect that certificates were signed +/// using any of the following algorithms: +static SUPPORTED_SIG_ALGS: SignatureAlgorithms = &[ + &webpki::ECDSA_P256_SHA256, + &webpki::ECDSA_P256_SHA384, + &webpki::ECDSA_P384_SHA256, + &webpki::ECDSA_P384_SHA384, + &webpki::ED25519, + &webpki::RSA_PSS_2048_8192_SHA256_LEGACY_KEY, + &webpki::RSA_PSS_2048_8192_SHA384_LEGACY_KEY, + &webpki::RSA_PSS_2048_8192_SHA512_LEGACY_KEY, + &webpki::RSA_PKCS1_2048_8192_SHA256, + &webpki::RSA_PKCS1_2048_8192_SHA384, + &webpki::RSA_PKCS1_2048_8192_SHA512, + &webpki::RSA_PKCS1_3072_8192_SHA384, +]; + +/// Verifier that the x509 certificate `chain` was valid at the given `time`. +/// The end entity certificate must be the last in the `chain`. +pub fn verify_cert_chain(chain: &[CertDER], time: u64) -> Result<(), Error> { + let time = webpki::Time::from_seconds_since_unix_epoch(time); + let anchor = &webpki_roots::TLS_SERVER_ROOTS; + + let last_cert_der = extract_leaf_cert(chain)?; + + // Parse the DER into x509. Since webpki doesn't expose the parser, + // we use x509-parser instead + let (_, x509) = certificate::X509Certificate::from_der(&last_cert_der) + .map_err(|e| Error::X509ParserError(e.to_string()))?; + + // the end entity must not be a certificate authority + if x509.is_ca() { + return Err(Error::EndEntityIsCA); + } + + // parse the der again with webpki + let cert = webpki::EndEntityCert::try_from(last_cert_der.as_slice()) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + // Separate intermediate certificates (all certs except for the last one which is the end + // entity cert). It is ok to keep the root cert among the interm certs because webpki will + // handle such cases properly. + let interm = (0..chain.len() - 1) + .map(|i| chain[i].as_slice()) + .collect::>(); + + cert.verify_is_valid_tls_server_cert(SUPPORTED_SIG_ALGS, anchor, interm.as_slice(), time) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + Ok(()) +} + +/// Verifies the signature over the TLS key exchange parameters +pub fn verify_sig_ke_params( + // certificate which signed the key exchange parameters + cert: &CertDER, + sig_ke_params: &SignatureKeyExchangeParams, + // the following three are the parameters that were signed + ephem_pubkey: &EphemeralECPubkey, + client_random: &[u8], + server_random: &[u8], +) -> Result<(), Error> { + let cert = webpki::EndEntityCert::try_from(cert.as_slice()) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + // curve constant from the TLS spec + let curve_const = match &ephem_pubkey.typ { + EphemeralECPubkeyType::P256 => [0x00, 0x17], + _ => return Err(Error::UnknownCurveInKeyExchange), + }; + + // message that was signed + let msg = [ + client_random, + server_random, + &[0x03], // type of the public key 0x03 = named_curve + &curve_const, + &[ephem_pubkey.pubkey.len() as u8], // pubkey length + &ephem_pubkey.pubkey, // pubkey + ] + .concat(); + + // we can't use [webpki::SignatureAlgorithm] in [SignatureKeyExchangeParams::alg] + // because it is not Clone. Instead we match: + let sigalg = match &sig_ke_params.alg { + SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256 => &webpki::RSA_PKCS1_2048_8192_SHA256, + SigKEParamsAlg::ECDSA_P256_SHA256 => &webpki::ECDSA_P256_SHA256, + _ => return Err(Error::UnknownSigningAlgorithmInKeyExchange), + }; + + cert.verify_signature(sigalg, &msg, &sig_ke_params.sig) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + Ok(()) +} + +// check that the hostname is present in the cert +pub fn check_hostname_present_in_cert(cert: &CertDER, hostname: String) -> Result<(), Error> { + let cert = webpki::EndEntityCert::try_from(cert.as_slice()) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + let dns_name = webpki::DnsNameRef::try_from_ascii_str(hostname.as_str()) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + cert.verify_is_valid_for_dns_name(dns_name) + .map_err(|e| Error::WebpkiError(e.to_string()))?; + + Ok(()) +} + +/// Returns the leaf certificate from the chain (the last one) +pub fn extract_leaf_cert(chain: &[CertDER]) -> Result { + match chain.last() { + None => Err(Error::EmptyCertificateChain), + Some(last) => Ok(last.clone()), + } +} + +#[cfg(test)] +mod test { + use crate::tls_doc::*; + + use super::*; + + /// end entity cert + static EE: &[u8] = include_bytes!("testdata/tlsnotary.org/ee.der"); + // intermediate cert + static INTER: &[u8] = include_bytes!("testdata/tlsnotary.org/inter.der"); + // certificate authority cert + static CA: &[u8] = include_bytes!("testdata/tlsnotary.org/ca.der"); + // unix time when the cert chain was valid + static TIME: u64 = 1671637529; + // unix time when the cert chain was NOT valid + static BADTIME: u64 = 1571465711; + + // Key exchange-related data for an RSA certificate. Generated with openssl + // (see testdata/key_exchange/README for details) + static RSA_CERT: &[u8] = include_bytes!("testdata/key_exchange/rsa/cert_rsa.der"); + static RSA_CR: &[u8] = include_bytes!("testdata/key_exchange/rsa/client_random"); + static RSA_SR: &[u8] = include_bytes!("testdata/key_exchange/rsa/server_random"); + static RSA_EPHEM_PUBKEY: &[u8] = include_bytes!("testdata/key_exchange/rsa/pubkey"); + static RSA_SIG: &[u8] = include_bytes!("testdata/key_exchange/rsa/signature"); + + // Key exchange-related data for an ECDSA certificate. Generated with openssl + // (see testdata/key_exchange/README for details) + static ECDSA_CERT: &[u8] = include_bytes!("testdata/key_exchange/ecdsa/cert_ecdsa.der"); + static ECDSA_CR: &[u8] = include_bytes!("testdata/key_exchange/ecdsa/client_random"); + static ECDSA_SR: &[u8] = include_bytes!("testdata/key_exchange/ecdsa/server_random"); + static ECDSA_EPHEM_PUBKEY: &[u8] = include_bytes!("testdata/key_exchange/ecdsa/pubkey"); + static ECDSA_SIG: &[u8] = include_bytes!("testdata/key_exchange/ecdsa/signature"); + + #[test] + /// Expect to succeed when CA is explicitely provided + fn test_verify_cert_chain_ca_explicit() { + assert!(verify_cert_chain(&[CA.to_vec(), INTER.to_vec(), EE.to_vec()], TIME).is_ok()); + } + + #[test] + /// Expect to succeed when CA is NOT explicitely provided. webpki will look + /// it up among the trusted root certs. + fn test_verify_cert_chain_ca_implicit() { + assert!(verify_cert_chain(&[INTER.to_vec(), EE.to_vec()], TIME).is_ok()); + } + + #[test] + // Expect to fail since the end entity cert was not valid at the time + fn test_verify_cert_chain_bad_time() { + let err = verify_cert_chain(&[CA.to_vec(), INTER.to_vec(), EE.to_vec()], BADTIME); + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("CertNotValidYet".to_string()) + ); + } + + #[test] + // Expect to fail when no end entity cert provided + fn test_verify_cert_chain_no_leaf_cert() { + let err = verify_cert_chain(&[CA.to_vec(), INTER.to_vec()], TIME); + assert_eq!(err.unwrap_err(), Error::EndEntityIsCA); + } + + #[test] + // Expect to fail when no intermediate cert provided + fn test_verify_cert_chain_no_interm_cert() { + let err = verify_cert_chain(&[CA.to_vec(), EE.to_vec()], TIME); + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("UnknownIssuer".to_string()) + ); + } + + #[test] + // Expect to fail when unknown root cert was provided, even though the cert chain + // is valid + fn test_verify_cert_chain_unknown_root() { + // locally generated valid chain with an unknown CA: + let ee: &[u8] = include_bytes!("testdata/unknown/ee.der"); + let ca: &[u8] = include_bytes!("testdata/unknown/ca.der"); + + let err = verify_cert_chain(&[ca.to_vec(), ee.to_vec()], TIME); + + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("UnknownIssuer".to_string()) + ); + } + + // convert a hex string to bytes + fn to_hex(string: &[u8]) -> Vec { + hex::decode(string.to_ascii_lowercase()).unwrap() + } + + // Expect to succeed when key exchange params signed correctly with an RSA cert + #[test] + fn test_verify_sig_ke_params_rsa() { + let cr: &[u8] = &to_hex(RSA_CR); + let sr: &[u8] = &to_hex(RSA_SR); + let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); + let sig: &[u8] = &to_hex(RSA_SIG); + + let sig = SignatureKeyExchangeParams { + alg: SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, + sig: sig.to_vec(), + }; + + let pubkey = EphemeralECPubkey { + pubkey: pubkey.to_vec(), + typ: EphemeralECPubkeyType::P256, + }; + + assert!(verify_sig_ke_params(&RSA_CERT.to_vec(), &sig, &pubkey, cr, sr).is_ok()); + } + + // Expect to succeed when key exchange params signed correctly with an ECDSA cert + #[test] + fn test_verify_sig_ke_params_ecdsa() { + let cr: &[u8] = &to_hex(ECDSA_CR); + let sr: &[u8] = &to_hex(ECDSA_SR); + let pubkey: &[u8] = &to_hex(ECDSA_EPHEM_PUBKEY); + let sig: &[u8] = &to_hex(ECDSA_SIG); + + let sig = SignatureKeyExchangeParams { + alg: SigKEParamsAlg::ECDSA_P256_SHA256, + sig: sig.to_vec(), + }; + + let pubkey = EphemeralECPubkey { + pubkey: pubkey.to_vec(), + typ: EphemeralECPubkeyType::P256, + }; + + assert!(verify_sig_ke_params(&ECDSA_CERT.to_vec(), &sig, &pubkey, cr, sr).is_ok()); + } + + // Expect RSA sig verification to fail because client_random is wrong + #[test] + fn test_verify_sig_ke_params_rsa_bad_client_random() { + let cr: &[u8] = &to_hex(RSA_CR); + let sr: &[u8] = &to_hex(RSA_SR); + let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); + let sig: &[u8] = &to_hex(RSA_SIG); + + let sig = SignatureKeyExchangeParams { + alg: SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, + sig: sig.to_vec(), + }; + + let pubkey = EphemeralECPubkey { + pubkey: pubkey.to_vec(), + typ: EphemeralECPubkeyType::P256, + }; + + let mut cr = cr.to_vec(); + // corrupt the last byte of client random + let last = cr.pop().unwrap(); + let (corrupted, _) = last.overflowing_add(1); + cr.push(corrupted); + + let err = verify_sig_ke_params(&RSA_CERT.to_vec(), &sig, &pubkey, &cr, sr); + + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("InvalidSignatureForPublicKey".to_string()) + ); + } + + // Expect ECDSA sig verification to fail because the sig is wrong + #[test] + fn test_verify_sig_ke_params_ecdsa_bad_sig() { + let cr: &[u8] = &to_hex(ECDSA_CR); + let sr: &[u8] = &to_hex(ECDSA_SR); + let pubkey: &[u8] = &to_hex(ECDSA_EPHEM_PUBKEY); + let sig: &[u8] = &to_hex(ECDSA_SIG); + + let mut sig = sig.to_vec(); + // corrupt the last byte of the signature + let last = sig.pop().unwrap(); + let (corrupted, _) = last.overflowing_add(1); + sig.push(corrupted); + + let sig = SignatureKeyExchangeParams { + alg: SigKEParamsAlg::ECDSA_P256_SHA256, + sig: sig.to_vec(), + }; + + let pubkey = EphemeralECPubkey { + pubkey: pubkey.to_vec(), + typ: EphemeralECPubkeyType::P256, + }; + + let err = verify_sig_ke_params(&ECDSA_CERT.to_vec(), &sig, &pubkey, cr, sr); + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("InvalidSignatureForPublicKey".to_string()) + ); + } + + // Expect to succeed + #[test] + fn test_check_hostname_present_in_cert() { + let host = String::from("tlsnotary.org"); + assert!(check_hostname_present_in_cert(&EE.to_vec(), host).is_ok()); + } + + // Expect to fail because the host name is not in the cert + #[test] + fn test_check_hostname_present_in_cert_bad_host() { + let host = String::from("tlsnotary"); + let err = check_hostname_present_in_cert(&EE.to_vec(), host); + let _str = String::from("CertNotValidForName"); + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("CertNotValidForName".to_string()) + ); + } + + // Expect to fail because the host name is not a valid DNS name + #[test] + fn test_check_hostname_present_in_cert_invalid_dns_name() { + let host = String::from("tlsnotary.org%"); + let err = check_hostname_present_in_cert(&EE.to_vec(), host); + assert_eq!( + err.unwrap_err(), + Error::WebpkiError("InvalidDnsNameError".to_string()) + ); + } +} From cb301cb14e61b3be47281d16344667cec5d2502e Mon Sep 17 00:00:00 2001 From: themighty1 Date: Mon, 23 Jan 2023 21:05:48 +0200 Subject: [PATCH 02/23] cleanups --- verifier/src/checks.rs | 177 ++++++++++++++++---------- verifier/src/commitment.rs | 136 ++++++++++++-------- verifier/src/error.rs | 26 ++-- verifier/src/lib.rs | 86 ++++++------- verifier/src/pubkey.rs | 39 ++++-- verifier/src/signed.rs | 38 ++++-- verifier/src/tls_doc.rs | 129 +++++++++++-------- verifier/src/utils.rs | 49 +++++--- verifier/src/verifier_doc.rs | 233 +++++++++++++++++++++-------------- verifier/src/webpki_utils.rs | 103 ++++++++-------- 10 files changed, 609 insertions(+), 407 deletions(-) diff --git a/verifier/src/checks.rs b/verifier/src/checks.rs index 02be36d2a1..1323723580 100644 --- a/verifier/src/checks.rs +++ b/verifier/src/checks.rs @@ -1,63 +1,105 @@ -/// Methods performing various sanity checks on the [crate::verifier_doc::VerifierDocUnchecked] -use crate::verifier_doc::VerifierDocUnchecked; -use crate::{commitment::Range, Error}; +/// Methods performing various validation checks on the [crate::verifier_doc::VerifierDocUnchecked] +use super::verifier_doc::VerifierDocUnchecked; +use super::{commitment::Range, Error}; + +pub fn perform_checks(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + // Performs the following validation checks: + // + // - at least one commitment is present + check_at_least_one_commitment_present(unchecked)?; + + // - commitments and openings have their ids incremental and ascending + check_commitment_and_opening_ids(unchecked)?; + + // - commitment count equals opening count + check_commitment_and_opening_count_equal(unchecked)?; + + // - ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing + check_ranges_inside_each_commitment(unchecked)?; + + // - the length of each opening equals the amount of committed data in the ranges of the + // corresponding commitment + // - the total amount of committed data is less than 1GB to prevent DoS + check_commitment_sizes(unchecked)?; + + // - the amount of commitments is less that 1000 + check_commitment_count(unchecked)?; + + // - overlapping openings must match exactly + check_overlapping_openings(unchecked)?; + + // - each [merkle_tree_index] is both unique and also ascending between commitments + check_merkle_tree_indices(unchecked)?; + + Ok(()) +} /// Condition checked: at least one commitment is present -pub fn check_at_least_one_commitment_present( - unchecked: &VerifierDocUnchecked, -) -> Result<(), Error> { - if unchecked.commitments.is_empty() { - return Err(Error::SanityCheckError); +fn check_at_least_one_commitment_present(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + if unchecked.commitments().is_empty() { + return Err(Error::SanityCheckError( + "check_at_least_one_commitment_present".to_string(), + )); } Ok(()) } /// Condition checked: commitments and openings have their ids incremental and ascending -pub fn check_commitment_and_opening_ids(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { - for i in 0..unchecked.commitments.len() { - if !(unchecked.commitments[i].id == i && unchecked.commitment_openings[i].id == i) { - return Err(Error::SanityCheckError); +fn check_commitment_and_opening_ids(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + for i in 0..unchecked.commitments().len() { + if !(unchecked.commitments()[i].id() == i && unchecked.commitment_openings()[i].id() == i) { + return Err(Error::SanityCheckError( + "check_commitment_and_opening_ids".to_string(), + )); } } Ok(()) } /// Condition checked: commitment count equals opening count -pub fn check_commitment_and_opening_count_equal( - unchecked: &VerifierDocUnchecked, -) -> Result<(), Error> { - if unchecked.commitments.len() != unchecked.commitment_openings.len() { - return Err(Error::SanityCheckError); +fn check_commitment_and_opening_count_equal(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + if unchecked.commitments().len() != unchecked.commitment_openings().len() { + return Err(Error::SanityCheckError( + "check_commitment_and_opening_count_equal".to_string(), + )); } Ok(()) } /// Condition checked: ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing -pub fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { - for c in &unchecked.commitments { - let len = c.ranges.len(); +fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + for c in unchecked.commitments() { + let len = c.ranges().len(); // at least one range is expected if len == 0 { - return Err(Error::SanityCheckError); + return Err(Error::SanityCheckError( + "check_ranges_inside_each_commitment".to_string(), + )); } - for r in &c.ranges { + for r in c.ranges() { // ranges must be valid - if r.end <= r.start { - return Err(Error::SanityCheckError); + if r.end() <= r.start() { + return Err(Error::SanityCheckError( + "check_ranges_inside_each_commitment".to_string(), + )); } } // ranges must not overlap and must be ascending relative to each other - for pair in c.ranges.windows(2) { - if pair[1].start < pair[0].end { - return Err(Error::SanityCheckError); + for pair in c.ranges().windows(2) { + if pair[1].start() < pair[0].end() { + return Err(Error::SanityCheckError( + "check_ranges_inside_each_commitment".to_string(), + )); } } // range bound must not be larger than u32 - if c.ranges[len - 1].end > (u32::MAX as usize) { - return Err(Error::SanityCheckError); + if c.ranges()[len - 1].end() > (u32::MAX as usize) { + return Err(Error::SanityCheckError( + "check_ranges_inside_each_commitment".to_string(), + )); } } @@ -68,21 +110,25 @@ pub fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> /// corresponding commitment /// Condition checked: the total amount of committed data is less than 1GB to prevent DoS /// (this will cause the verifier to hash up to a max of 1GB * 128 = 128GB of labels) -pub fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { let mut total_committed = 0usize; - for i in 0..unchecked.commitment_openings.len() { - let expected = unchecked.commitment_openings[i].opening.len(); + for i in 0..unchecked.commitment_openings().len() { + let expected = unchecked.commitment_openings()[i].opening().len(); let mut total_in_ranges = 0usize; - for r in &unchecked.commitments[i].ranges { - total_in_ranges += r.end - r.start; + for r in unchecked.commitments()[i].ranges() { + total_in_ranges += r.end() - r.start(); } if expected != total_in_ranges { - return Err(Error::SanityCheckError); + return Err(Error::SanityCheckError( + "check_commitment_sizes".to_string(), + )); } total_committed += total_in_ranges; if total_committed > 1000000000 { - return Err(Error::SanityCheckError); + return Err(Error::SanityCheckError( + "check_commitment_sizes".to_string(), + )); } } Ok(()) @@ -91,23 +137,27 @@ pub fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Er /// Condition checked: the amount of commitments is less that 1000 /// (searching for overlapping commitments in the naive way which we implemeted has quadratic cost, /// hence this number shouldn't be too high to prevent DoS) -pub fn check_commitment_count(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { - if unchecked.commitments.len() >= 1000 { - return Err(Error::SanityCheckError); +fn check_commitment_count(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { + if unchecked.commitments().len() >= 1000 { + return Err(Error::SanityCheckError( + "check_commitment_count".to_string(), + )); } Ok(()) } /// Condition checked: each Merkle tree index is both unique and also ascending between commitments -pub fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { let indices: Vec = unchecked - .commitments + .commitments() .iter() - .map(|c| c.merkle_tree_index) + .map(|c| c.merkle_tree_index()) .collect(); for pair in indices.windows(2) { if pair[0] >= pair[1] { - return Err(Error::SanityCheckError); + return Err(Error::SanityCheckError( + "check_merkle_tree_indices".to_string(), + )); } } Ok(()) @@ -116,13 +166,13 @@ pub fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), /// Makes sure that if two or more commitments contain overlapping ranges, the openings /// corresponding to those ranges match exactly. Otherwise, if the openings don't match, /// returns an error. -pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { // Note: using an existing lib to find multi-range overlap would incur the need to audit // that lib for correctness. Instead, since checking two range overlap is cheap, we are using // a naive way where we compare each range to all other ranges. // This naive way will have redundancy in computation but it will be easy to audit. - for needle_c in unchecked.commitments.iter() { + for needle_c in unchecked.commitments().iter() { // Naming convention: we use the prefix "needle" to indicate the range that we are // looking for (and to indicate the associates offsets, commitments and openings). // Likewise the prefix "haystack" indicates _where_ we are searching. @@ -130,9 +180,9 @@ pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<() // byte offset in the opening. always positioned at the beginning of the range let mut needle_offset = 0usize; - for needle_range in &needle_c.ranges { - for haystack_c in unchecked.commitments.iter() { - if needle_c.id == haystack_c.id { + for needle_range in needle_c.ranges() { + for haystack_c in unchecked.commitments().iter() { + if needle_c.id() == haystack_c.id() { // don't search within the same commitment continue; } @@ -142,27 +192,27 @@ pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<() // will be set to true when overlap is found let mut overlap_was_found = false; - for haystack_range in &haystack_c.ranges { + for haystack_range in haystack_c.ranges() { match overlapping_range(needle_range, haystack_range) { Some(ov_range) => { // the bytesize of the overlap - let overlap_size = ov_range.end - ov_range.start; + let overlap_size = ov_range.end() - ov_range.start(); // Find position (in the openings) from which the overlap starts. The // offsets are already pointing to the beginning of the range, we just // need to add the offset **within** the range. let needle_ov_start = - needle_offset + (ov_range.start - needle_range.start); + needle_offset + (ov_range.start() - needle_range.start()); let haystack_ov_start = - haystack_offset + (ov_range.start - haystack_range.start); + haystack_offset + (ov_range.start() - haystack_range.start()); // get the openings which overlapped // TODO: will later add a method get_opening_by_id() - let needle_o = &unchecked.commitment_openings[needle_c.id]; - let haystack_o = &unchecked.commitment_openings[haystack_c.id]; + let needle_o = &unchecked.commitment_openings()[needle_c.id()]; + let haystack_o = &unchecked.commitment_openings()[haystack_c.id()]; - if needle_o.opening[needle_ov_start..needle_ov_start + overlap_size] - != haystack_o.opening + if needle_o.opening()[needle_ov_start..needle_ov_start + overlap_size] + != haystack_o.opening() [haystack_ov_start..haystack_ov_start + overlap_size] { return Err(Error::OverlappingOpeningsDontMatch); @@ -184,11 +234,11 @@ pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<() } // advance the offset to the beginning of the next range - haystack_offset += haystack_range.end - haystack_range.start; + haystack_offset += haystack_range.end() - haystack_range.start(); } } // advance the offset to the beginning of the next range - needle_offset += needle_range.end - needle_range.start; + needle_offset += needle_range.end() - needle_range.start(); } } @@ -198,14 +248,11 @@ pub fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<() /// If two [Range]s overlap, returns the range containing the overlap fn overlapping_range(a: &Range, b: &Range) -> Option { // find purported overlap's start and end - let ov_start = std::cmp::max(a.start, b.start); - let ov_end = std::cmp::min(a.end, b.end); + let ov_start = std::cmp::max(a.start(), b.start()); + let ov_end = std::cmp::min(a.end(), b.end()); if (ov_end - ov_start) < 1 { - return None; + None } else { - return Some(Range { - start: ov_start, - end: ov_end, - }); + Some(Range::new(ov_start, ov_end)) } } diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs index c80f48016c..1fe82c724d 100644 --- a/verifier/src/commitment.rs +++ b/verifier/src/commitment.rs @@ -1,26 +1,20 @@ -use crate::LabelSeed; - -use super::error::Error; -use crate::utils::compute_label_commitment; -use rand::Rng; -use rand_chacha::ChaCha12Rng; -use rand_core::SeedableRng; -use serde; -use sha2::{Digest, Sha256}; - -// A User's commitment to a portion of the TLS transcript -#[derive(serde::Serialize)] +use super::{error::Error, utils::compute_label_commitment, LabelSeed}; +use serde::Serialize; +use std::any::Any; + +/// A User's commitment to a portion of the notarized data +#[derive(Serialize)] pub struct Commitment { - pub id: usize, - pub typ: CommitmentType, - pub direction: Direction, + id: usize, + typ: CommitmentType, + direction: Direction, // The index of this commitment in the Merkle tree of commitments - pub merkle_tree_index: usize, - // the actual commitment - pub commitment: [u8; 32], - // ranges of absolute offsets in the TLS transcript. The committed data + merkle_tree_index: usize, + // The actual commitment + commitment: [u8; 32], + // The absolute byte ranges within the notarized data. The committed data // is located in those ranges. - pub ranges: Vec, + ranges: Vec, } impl Commitment { @@ -42,13 +36,24 @@ impl Commitment { } } - /// Check this commitment against the opening. - /// The opening is a (salted) hash of all garbled circuit active labels in the - /// ranges of the Commitment - pub fn verify(&self, opening: &CommitmentOpening, seed: &LabelSeed) -> Result<(), Error> { - // TODO: will change this method to be in agreement with the Label Encoder PR? - let expected = - compute_label_commitment(&opening.opening, seed, &self.ranges, opening.salt.clone())?; + /// Verifies this commitment against the opening. `extra_data` holds extra data specific + /// to the commitment type. + pub fn verify( + &self, + opening: &CommitmentOpening, + extra_data: Box, + ) -> Result<(), Error> { + let expected = match self.typ { + CommitmentType::labels_blake3 => { + let seed = match extra_data.downcast::() { + Ok(seed) => *seed, + Err(_) => return Err(Error::InternalError), + }; + + compute_label_commitment(&opening.opening, &self.ranges, &seed, opening.salt())? + } + _ => return Err(Error::InternalError), + }; if expected != self.commitment { return Err(Error::CommitmentVerificationFailed); @@ -56,25 +61,44 @@ impl Commitment { Ok(()) } + + pub fn id(&self) -> usize { + self.id + } + + pub fn typ(&self) -> &CommitmentType { + &self.typ + } + + pub fn merkle_tree_index(&self) -> usize { + self.merkle_tree_index + } + + pub fn commitment(&self) -> [u8; 32] { + self.commitment + } + + pub fn ranges(&self) -> &Vec { + &self.ranges + } } -#[derive(Clone, PartialEq, serde::Serialize)] +#[derive(Clone, PartialEq, Serialize)] pub enum CommitmentType { - // a blake3 hash of the garbled circuit wire labels corresponding to the bits - // of the commitment opening + // A blake3 digest of the garbled circuit's active labels. The labels are generated from a PRG seed. + // For more details on the protocol used to generate this commitment, see + // https://github.com/tlsnotary/docs-mdbook/blob/main/src/protocol/notarization/public_data_commitment.md labels_blake3, } -// Commitment opening contains either the committed value or a zk proof -// about some property of that value -#[derive(serde::Serialize, Clone, Default)] +/// Commitment opening contains the committed value +#[derive(Serialize)] pub struct CommitmentOpening { /// the id of the [Commitment] corresponding to this opening - pub id: usize, - // the actual opening of the commitment. Optional because a zk proof - // about some property of the opening can be provided instead - pub opening: Vec, - // all our commitments are salted by appending 16 random bytes + id: usize, + // the actual opening of the commitment + opening: Vec, + // all our commitments are `salt`ed by appending 16 random bytes salt: Vec, } @@ -82,9 +106,21 @@ impl CommitmentOpening { pub fn new(id: usize, opening: Vec, salt: Vec) -> Self { Self { id, opening, salt } } + + pub fn id(&self) -> usize { + self.id + } + + pub fn opening(&self) -> &Vec { + &self.opening + } + + pub fn salt(&self) -> &Vec { + &self.salt + } } -#[derive(serde::Serialize, Clone, PartialEq)] +#[derive(Serialize, Clone, PartialEq)] // A TLS transcript consists of a stream of bytes which were sent to the server (Request) // and a stream of bytes which were received from the server (Response). The User creates // separate commitments to bytes in each direction. @@ -93,23 +129,23 @@ pub enum Direction { Response, } -#[derive(serde::Serialize, Clone)] -/// half-open range [start, end). Range bounds are ascending i.e. start < end +#[derive(Serialize, Clone)] +/// A half-open range [start, end). Range bounds are ascending i.e. start < end pub struct Range { - pub start: usize, - pub end: usize, + start: usize, + end: usize, } impl Range { pub fn new(start: usize, end: usize) -> Self { Self { start, end } } -} -// convert a slice of u8 into a vec of bool in the least-bit-first order -pub fn u8_to_boolvec(bytes: &[u8]) -> Vec { - // TODO: need to implement - vec![true; bytes.len() * 8] -} + pub fn start(&self) -> usize { + self.start + } -fn test() {} + pub fn end(&self) -> usize { + self.end + } +} diff --git a/verifier/src/error.rs b/verifier/src/error.rs index 6f1b927122..1c8911dcf3 100644 --- a/verifier/src/error.rs +++ b/verifier/src/error.rs @@ -2,28 +2,36 @@ pub enum Error { #[error("Can't verify the document because either signature or pubkey were not provided")] NoPubkeyOrSignature, + #[error("The document is expected to contain a signature")] + SignatureExpected, + #[error("The document is NOT expected to contain a signature")] + SignatureNotExpected, #[error("x509-parser error: {0}")] X509ParserError(String), #[error("webpki error: {0}")] WebpkiError(String), - #[error("unspecified error")] - VerificationError, - #[error("the certificate chain was empty")] + #[error("Certificate chain was empty")] EmptyCertificateChain, - #[error("the end entity must not be a certificate authority")] + #[error("End entity must not be a certificate authority")] EndEntityIsCA, - #[error("the key exchange was signed using an unknown curve")] + #[error("Key exchange data was signed using an unknown curve")] UnknownCurveInKeyExchange, - #[error("the key exchange was signed using an unknown algorithm")] + #[error("Key exchange data was signed using an unknown algorithm")] UnknownSigningAlgorithmInKeyExchange, #[error("Commitment verification failed")] CommitmentVerificationFailed, - #[error("error while performing sanity check")] - SanityCheckError, + #[error("Error while performing validation check in: {0}")] + SanityCheckError(String), #[error("Failed to verify a Merkle proof")] MerkleProofVerificationFailed, #[error("Overlapping openings don't match")] OverlappingOpeningsDontMatch, - #[error("internal error occured")] + #[error("Failed while checking committed TLS")] + CommittedTLSCheckFailed, + #[error("An internal error occured")] InternalError, + #[error("An internal error during serialization or deserialization")] + SerializationError, + #[error("Error during signature verification")] + SignatureVerificationError, } diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 0ae6cdc2db..dee9db03b3 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -10,27 +10,27 @@ mod verifier_doc; mod webpki_utils; use crate::signed::Signed; -use blake3::Hasher; use error::Error; use pubkey::PubKey; +use utils::blake3; use verifier_doc::{VerifierDoc, VerifierDocUnchecked}; type HashCommitment = [u8; 32]; -struct VerifierCore { - /// notarization doc which needs to be verified +/// Verifier of the notarization document +/// +/// Once the verification succeeds, an application level (e.g. HTTP, JSON) parser can +/// parse `commitment_openings` in [VerifierDoc] +struct Verifier { + /// A validated notarization document which needs to be verified doc: VerifierDoc, - /// trusted notary's pubkey. If this Verifier is also the Notary then no pubkey needs - /// to be provided, the signature on the [crate::main_doc::MainDoc] will not be checked. + /// A trusted Notary's pubkey (if this Verifier acted as the Notary then no pubkey needs + /// to be provided) trusted_pubkey: Option, } -/// Verifies the core aspects of the notarization session: the Notary signature, the TLS -/// authenticity and the correctness of commitments and zk proofs. -/// -/// After the verification completes, the application level (e.g. HTTP) parser can start -/// parsing the openings in [VerifierDoc::commitment_openings] -impl VerifierCore { +impl Verifier { + /// Validates the notarization document and creates a new Verifier pub fn new( doc_unchecked: VerifierDocUnchecked, trusted_pubkey: Option, @@ -42,63 +42,53 @@ impl VerifierCore { }) } - /// verifies that the session in the VerifierDoc came from the server with the dns_name + /// Verifies that the notarization document resulted from notarizing data from a TLS server with the + /// DNS name `dns_name`. `dns_name` must be exactly as it appears in the server's TLS certificate. + /// Also verifies the Notary's signature (if any). + /// + /// IMPORTANT: + /// if the notarized application data is HTTP, the checks below will not be sufficient. You must also + /// check on the HTTP parser's level against domain fronting. /// - /// Note that the checks below are not sufficient to establish data provenance. - /// There also must be a check done on the HTTP level against the domain fronting - /// attack. pub fn verify(&self, dns_name: String) -> Result<(), Error> { - // verify the Notary signature, if any - match (&self.doc.signature, &self.trusted_pubkey) { + // verify Notary's signature, if any + match (self.doc.signature(), &self.trusted_pubkey) { (Some(sig), Some(pubkey)) => { - self.verify_doc_signature(pubkey, sig, &self.signed_data())?; + self.verify_doc_signature(pubkey, sig)?; } // no pubkey and no signature, do nothing (None, None) => (), - // either pubkey or sig is missing + // either pubkey or signature is missing _ => { return Err(Error::NoPubkeyOrSignature); } } - // verify all other aspects of notarization + // verify the document self.doc.verify(dns_name)?; Ok(()) } - // verify Notary's sig on the notarization doc - fn verify_doc_signature( - &self, - pubkey: &PubKey, - sig: &[u8], - msg: &Signed, - ) -> Result { - let serialized = bincode::serialize(&msg).unwrap(); - Ok(pubkey.verify_signature(&serialized, sig)) + /// Verifies Notary's signature on that part of the document which was signed + fn verify_doc_signature(&self, pubkey: &PubKey, sig: &[u8]) -> Result<(), Error> { + let msg = self.signed_data().serialize()?; + pubkey.verify_signature(&msg, sig) } - // extracts the necessary data from the VerifierDoc into a Signed - // struct and returns it + /// Extracts the necessary fields from the [VerifierDoc] into a [Signed] + /// struct and returns it fn signed_data(&self) -> Signed { - //let doc = &self.doc.clone(); (&self.doc).into() } } -/// A PRG seeds from which to generate Notary's circuits' input labels for one -/// direction. We will use 2 separate seeds: one to generate the labels for all -/// plaintext which was sent and another seed to generate the labels for all plaintext -/// which was received +/// A PRG seeds from which to generate garbled circuit active labels, see +/// [crate::commitment::CommitmentType::labels_blake3] type LabelSeed = [u8; 32]; -pub fn blake3(data: &[u8]) -> [u8; 32] { - let mut hasher = Hasher::new(); - hasher.update(data); - hasher.finalize().into() -} - #[test] +// Create a document and verify it fn e2e_test() { use crate::{ commitment::{Commitment, CommitmentOpening, CommitmentType, Direction, Range}, @@ -147,7 +137,7 @@ fn e2e_test() { // Using the above data, the User computes [CommittedTLS] and sends a commitment to the Notary let committed_tls = CommittedTLS::new(cert_chain, params, cr, sr); - let commitment_to_tls = blake3(&bincode::serialize(&committed_tls).unwrap()); + let commitment_to_tls = blake3(&committed_tls.serialize().unwrap()); // ---------- After the notar. session is over: @@ -156,7 +146,7 @@ fn e2e_test() { let plaintext = b"This data will be notarized"; let ranges = vec![Range::new(2, 8)]; - let salt: [u8; 32] = rng.gen(); //TODO change to random salt + let salt: [u8; 32] = rng.gen(); // Note that the User will NOT be actually calling compute_label_commitment(). He doesn't // have label_seed at this point of the protocol. Instead, the User will @@ -165,7 +155,7 @@ fn e2e_test() { // let label_seed = rng.gen(); let hash_commitment = - utils::compute_label_commitment(plaintext, &label_seed, &ranges, salt.to_vec()).unwrap(); + utils::compute_label_commitment(plaintext, &ranges, &label_seed, &salt.to_vec()).unwrap(); let comm = Commitment::new( 0, @@ -220,16 +210,16 @@ fn e2e_test() { vec![open], ); - // The User converts the doc into an unchecked Type and passes it to the Verifier + // The User converts the doc into an unchecked type and passes it to the Verifier let doc_unchecked: VerifierDocUnchecked = doc.into(); // The Verifier verifies the doc: // Initially the Verifier may store the Notary's pubkey as bytes. Converts it into // PubKey type - let trusted_pubkey = PubKey::from_bytes(KeyType::P256, pubkey_bytes); + let trusted_pubkey = PubKey::from_bytes(KeyType::P256, pubkey_bytes).unwrap(); - let verifier = VerifierCore::new(doc_unchecked, Some(trusted_pubkey)).unwrap(); + let verifier = Verifier::new(doc_unchecked, Some(trusted_pubkey)).unwrap(); verifier.verify("tlsnotary.org".to_string()).unwrap(); } diff --git a/verifier/src/pubkey.rs b/verifier/src/pubkey.rs index ffda9b58c4..c6a32f78e5 100644 --- a/verifier/src/pubkey.rs +++ b/verifier/src/pubkey.rs @@ -4,37 +4,50 @@ use p256::{ EncodedPoint, }; +use super::Error; + pub enum KeyType { P256, } +// A public key used by the Notary to sign the notarization session pub enum PubKey { P256(p256::ecdsa::VerifyingKey), } impl PubKey { - pub fn from_bytes(typ: KeyType, bytes: &[u8]) -> Self { + /// Constructs pubkey from bytes + pub fn from_bytes(typ: KeyType, bytes: &[u8]) -> Result { match typ { KeyType::P256 => { - let point = EncodedPoint::from_bytes(bytes).unwrap(); - PubKey::P256(p256::ecdsa::VerifyingKey::from_encoded_point(&point).unwrap()) + let point = match EncodedPoint::from_bytes(bytes) { + Ok(point) => point, + Err(_) => return Err(Error::InternalError), + }; + let vk = match p256::ecdsa::VerifyingKey::from_encoded_point(&point) { + Ok(vk) => vk, + Err(_) => return Err(Error::InternalError), + }; + Ok(PubKey::P256(vk)) } - _ => panic!(), + _ => Err(Error::InternalError), } } - pub fn verify_signature(&self, msg: &[u8], sig: &[u8]) -> bool { + /// Verifies a signature `sig` for the message `msg` + pub fn verify_signature(&self, msg: &[u8], sig: &[u8]) -> Result<(), Error> { match *self { PubKey::P256(key) => { - let signature = Signature::from_der(sig).unwrap(); - key.verify(msg, &signature).unwrap(); - true + let signature = match Signature::from_der(sig) { + Ok(sig) => sig, + Err(_) => return Err(Error::SignatureVerificationError), + }; + match key.verify(msg, &signature) { + Ok(_) => Ok(()), + Err(_) => return Err(Error::SignatureVerificationError), + } } + _ => Err(Error::InternalError), } } } - -#[test] -fn test() { - let key = PubKey::from_bytes(KeyType::P256, &[4; 32]); -} diff --git a/verifier/src/signed.rs b/verifier/src/signed.rs index f6e6d96d82..cb2ebc1c52 100644 --- a/verifier/src/signed.rs +++ b/verifier/src/signed.rs @@ -1,13 +1,14 @@ -use crate::{tls_doc::EphemeralECPubkey, HashCommitment, LabelSeed, VerifierDoc}; +use super::{tls_doc::EphemeralECPubkey, Error, HashCommitment, LabelSeed, VerifierDoc}; use serde::Serialize; #[derive(Clone, Serialize)] -// TLS-related struct which is signed by Notary +// TLS-related data which is signed by Notary pub struct SignedTLS { // notarization time against which the TLS Certificate validity is checked - pub time: u64, - pub ephemeralECPubkey: EphemeralECPubkey, - /// User's commitment to [crate::tls_doc::CommittedTLS] + time: u64, + // ephemeral pubkey for ECDH key exchange + ephemeralECPubkey: EphemeralECPubkey, + /// User's commitment to [super::tls_doc::CommittedTLS] pub commitment_to_TLS: HashCommitment, } @@ -23,16 +24,25 @@ impl SignedTLS { commitment_to_TLS, } } + + pub fn time(&self) -> u64 { + self.time + } + + pub fn ephemeralECPubkey(&self) -> &EphemeralECPubkey { + &self.ephemeralECPubkey + } } -/// All the data which the Notary signed +/// All the data which the Notary signs #[derive(Clone, Serialize)] pub struct Signed { - tls: SignedTLS, - /// see comments in [crate::VerifierDoc] about the fields below + pub tls: SignedTLS, + // see comments in [crate::VerifierDoc] for details about the fields below + /// PRG seed from which garbled circuit labels are generated pub label_seed: LabelSeed, /// Merkle root of all the commitments - merkle_root: [u8; 32], + pub merkle_root: [u8; 32], } impl Signed { @@ -44,6 +54,10 @@ impl Signed { merkle_root, } } + + pub fn serialize(self) -> Result, Error> { + bincode::serialize(&self).map_err(|_| Error::SerializationError) + } } /// Extracts relevant fields from the VerifierDoc. Those are the fields @@ -51,9 +65,9 @@ impl Signed { impl std::convert::From<&VerifierDoc> for Signed { fn from(doc: &VerifierDoc) -> Self { Signed::new( - doc.tls_doc.signed_tls.clone(), - doc.label_seed.clone(), - doc.merkle_root.clone(), + doc.tls_doc().signed_tls().clone(), + *doc.label_seed(), + *doc.merkle_root(), ) } } diff --git a/verifier/src/tls_doc.rs b/verifier/src/tls_doc.rs index a77abdfec2..a87ee23cc0 100644 --- a/verifier/src/tls_doc.rs +++ b/verifier/src/tls_doc.rs @@ -1,78 +1,92 @@ +use super::{signed::SignedTLS, utils::blake3, webpki_utils, Error}; use serde::Serialize; -use crate::pubkey; - -use super::{signed::SignedTLS, webpki_utils, Error}; -use bincode::serialize; - -// The doc containing all the info needed to verify the authenticity of the TLS session. -#[derive(Clone, Serialize)] +/// The document containing all the info needed to verify the authenticity of the TLS session. +#[derive(Serialize)] pub struct TLSDoc { - pub signed_tls: SignedTLS, - committedTLS: CommittedTLS, + signed_tls: SignedTLS, + committed_tls: CommittedTLS, } impl TLSDoc { - pub fn new(signed_tls: SignedTLS, committedTLS: CommittedTLS) -> Self { + pub fn new(signed_tls: SignedTLS, committed_tls: CommittedTLS) -> Self { Self { signed_tls, - committedTLS, + committed_tls, } } - /// Verifies the TLSDoc. Checks that `hostname` is present in the leaf certificate. - pub fn verify(&self, hostname: String) -> Result<(), Error> { + /// Verifies the TLS document against the DNS name `dns_name`: + /// - end entity certificate was issued to `dns_name` and was valid at the time of the + /// notarization + /// - certificate chain was signed by a trusted certificate authority + /// - key exchange parameters were signed by the end entity certificate + /// - commitment to misc TLS data is correct + /// + pub fn verify(&self, dns_name: String) -> Result<(), Error> { // Verify TLS certificate chain against local root certs. Some certs in the chain may // have expired at the time of this verification. We verify their validity at the time // of notarization. - webpki_utils::verify_cert_chain(&self.committedTLS.tls_cert_chain, self.signed_tls.time)?; + webpki_utils::verify_cert_chain( + &self.committed_tls.tls_cert_chain, + self.signed_tls.time(), + )?; - let leaf_cert = webpki_utils::extract_leaf_cert(&self.committedTLS.tls_cert_chain)?; + let ee_cert = webpki_utils::extract_end_entity_cert(&self.committed_tls.tls_cert_chain)?; - self.check_tls_commitment(&self.committedTLS, &self.signed_tls.commitment_to_TLS)?; + self.verify_tls_commitment(&self.committed_tls, &self.signed_tls.commitment_to_TLS)?; - //check that TLS key exchange parameters were signed by the leaf cert + //check that TLS key exchange parameters were signed by the end-entity cert webpki_utils::verify_sig_ke_params( - &leaf_cert, - &self.committedTLS.sig_ke_params, - &self.signed_tls.ephemeralECPubkey, - &self.committedTLS.client_random, - &self.committedTLS.server_random, + &ee_cert, + &self.committed_tls.sig_ke_params, + self.signed_tls.ephemeralECPubkey(), + &self.committed_tls.client_random, + &self.committed_tls.server_random, )?; - webpki_utils::check_hostname_present_in_cert(&leaf_cert, hostname)?; + webpki_utils::check_dns_name_present_in_cert(&ee_cert, dns_name)?; Ok(()) } - // check the commitment to misc TLS data - fn check_tls_commitment( + /// Verifies the commitment to misc TLS data + fn verify_tls_commitment( &self, - committedTLS: &CommittedTLS, + committed_tls: &CommittedTLS, commitment: &[u8; 32], ) -> Result<(), Error> { - let s = serialize(committedTLS).unwrap(); - // hash `serialize` and compare to `commitment` + if blake3(&committed_tls.serialize()?) != *commitment { + return Err(Error::CommittedTLSCheckFailed); + } Ok(()) } + + pub fn signed_tls(&self) -> &SignedTLS { + &self.signed_tls + } + + pub fn committed_tls(&self) -> &CommittedTLS { + &self.committed_tls + } } -// an x509 cert in DER format +/// an x509 certificate in DER format pub type CertDER = Vec; -// Misc TLS data which the User committed to before the User and the Notary engaged in 2PC -// to compute the TLS session keys +/// Misc TLS data which the User committed to before the User and the Notary engaged in 2PC +/// to compute the TLS session keys +/// +/// The User should not reveal `tls_cert_chain` because the Notary would learn the webserver name +/// from it. The User also should not reveal `signature_over_ephemeral_key` to the Notary, because +/// for ECDSA sigs it is possible to derive the pubkey from the sig and then use that pubkey to find out +/// the identity of the webserver. // -// The User should not reveal `tls_cert_chain` because the Notary would learn the webserver name -// from it. The User also should not reveal `signature_over_ephemeral_key` to the Notary, because -// for ECDSA sigs it is possible to derive the pubkey from the sig and then use that pubkey to find out -// the identity of the webserver. -// -// Note that there is no need to commit to the ephemeral key because it will be signed explicitely -// by the Notary -#[derive(Clone, Serialize)] +/// Note that there is no need to commit to the ephemeral key because it will be signed explicitely +/// by the Notary +#[derive(Serialize, Clone)] pub struct CommittedTLS { - pub tls_cert_chain: Vec, + tls_cert_chain: Vec, sig_ke_params: SignatureKeyExchangeParams, client_random: Vec, server_random: Vec, @@ -92,26 +106,37 @@ impl CommittedTLS { server_random, } } + + pub fn serialize(&self) -> Result, Error> { + bincode::serialize(&self).map_err(|_| Error::SerializationError) + } } -/// Types of the ephemeral EC pubkey supported by TLSNotary +/// Types of the ephemeral EC pubkey currently supported by TLSNotary #[derive(Clone, Serialize)] pub enum EphemeralECPubkeyType { P256, - ED25519, } /// The ephemeral EC public key (part of the TLS key exchange parameters) #[derive(Clone, Serialize)] pub struct EphemeralECPubkey { - pub typ: EphemeralECPubkeyType, - pub pubkey: Vec, + typ: EphemeralECPubkeyType, + pubkey: Vec, } impl EphemeralECPubkey { pub fn new(typ: EphemeralECPubkeyType, pubkey: Vec) -> Self { Self { typ, pubkey } } + + pub fn typ(&self) -> &EphemeralECPubkeyType { + &self.typ + } + + pub fn pubkey(&self) -> &Vec { + &self.pubkey + } } /// Algorithms that can be used for signing the TLS key exchange parameters @@ -121,15 +146,23 @@ pub enum SigKEParamsAlg { ECDSA_P256_SHA256, } -/// signature over the TLS key exchange params -#[derive(Clone, Serialize)] +/// A signature over the TLS key exchange params +#[derive(Serialize, Clone)] pub struct SignatureKeyExchangeParams { - pub alg: SigKEParamsAlg, - pub sig: Vec, + alg: SigKEParamsAlg, + sig: Vec, } impl SignatureKeyExchangeParams { pub fn new(alg: SigKEParamsAlg, sig: Vec) -> Self { Self { alg, sig } } + + pub fn alg(&self) -> &SigKEParamsAlg { + &self.alg + } + + pub fn sig(&self) -> &Vec { + &self.sig + } } diff --git a/verifier/src/utils.rs b/verifier/src/utils.rs index c3541b99dd..be4ae9711c 100644 --- a/verifier/src/utils.rs +++ b/verifier/src/utils.rs @@ -1,42 +1,42 @@ -use crate::{ - commitment::{u8_to_boolvec, Range}, - Error, LabelSeed, -}; +use super::{commitment::Range, Error, LabelSeed}; +use blake3::Hasher; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; -use sha2::{Digest, Sha256}; -// Given the plaintext (the opening) and the seed, compute a (salted) commitment to the garbled circuit labels -// in the byte ranges. +// Given a `substring` and its byte `ranges` within a larger string, computes a (`salt`ed) commitment +// to the garbled circuit labels. The labels are derived from a PRG `seed`. pub fn compute_label_commitment( - plaintext: &[u8], - seed: &LabelSeed, + substring: &[u8], ranges: &Vec, - salt: Vec, + seed: &LabelSeed, + salt: &Vec, ) -> Result<[u8; 32], Error> { // TODO: will need to bring this in harmony with label encoder in mpc-core let mut rng = ChaCha20Rng::from_seed(*seed); let delta: u128 = rng.gen(); - let mut bits_iter = u8_to_boolvec(plaintext).into_iter(); + // we need least-bit-first order, hence reverse() + let mut bits = u8vec_to_boolvec(substring); + bits.reverse(); + let mut bits_iter = bits.into_iter(); // for each bit of opening, expand the zero label at the rng stream offset // and, if needed, flip it to the one label, then hash the label - let mut hasher = Sha256::new(); + let mut hasher = Hasher::new(); for r in ranges { // set rng stream offset to the first label in range. +1 accounts for // the delta - rng.set_word_pos(4 * ((r.start as u128) + 1)); + rng.set_word_pos(4 * ((r.start() as u128) + 1)); // expand as many labels as there are bits in the range - (0..(r.end - r.start) * 8).map(|_| { + (0..(r.end() - r.start()) * 8).map(|_| { let zero_label: u128 = rng.gen(); let active_label = if bits_iter.next().unwrap() == true { zero_label ^ delta } else { zero_label }; - hasher.update(active_label.to_be_bytes()); + hasher.update(&active_label.to_be_bytes()); }); } // add salt @@ -49,7 +49,24 @@ pub fn compute_label_commitment( pub fn bytes_in_ranges(bytestring: &[u8], ranges: &[Range]) -> Vec { let mut substring: Vec = Vec::new(); for r in ranges { - substring.append(&mut bytestring[r.start..r.end].to_vec()) + substring.append(&mut bytestring[r.start()..r.end()].to_vec()) } substring } + +#[inline] +pub fn u8vec_to_boolvec(v: &[u8]) -> Vec { + let mut bv = Vec::with_capacity(v.len() * 8); + for byte in v.iter() { + for i in 0..8 { + bv.push(((byte >> (7 - i)) & 1) != 0); + } + } + bv +} + +pub fn blake3(data: &[u8]) -> [u8; 32] { + let mut hasher = Hasher::new(); + hasher.update(data); + hasher.finalize().into() +} diff --git a/verifier/src/verifier_doc.rs b/verifier/src/verifier_doc.rs index ca7c28f7a4..a6016b0e87 100644 --- a/verifier/src/verifier_doc.rs +++ b/verifier/src/verifier_doc.rs @@ -1,65 +1,53 @@ -use super::LabelSeed; -use crate::{ +use super::{ checks, commitment::{Commitment, CommitmentOpening, CommitmentType}, error::Error, tls_doc::TLSDoc, + LabelSeed, Signed, }; use rs_merkle::{algorithms, proof_serializers, MerkleProof}; -use serde::ser::Serializer; -use std::collections::HashMap; +use serde::{ser::Serializer, Serialize}; +use std::{any::Any, collections::HashMap}; -#[derive(serde::Serialize)] -/// The notarization document received from the User after all sanity checks passed +#[derive(Serialize)] +/// A validated notarization document received from the User pub struct VerifierDoc { version: u8, - pub tls_doc: TLSDoc, + tls_doc: TLSDoc, /// Notary's signature over the [Signed] portion of this doc - pub signature: Option>, - - // GC wire labels seed for the request data and the response data - // This is the seeds from which IWLs are generated in - // https://docs.tlsnotary.org/protocol/notarization/public_data_commitment.html - pub label_seed: LabelSeed, - - // The root of the Merkle tree of commitments. The User must prove that each [Commitment] is in the - // Merkle tree. - // This approach allows the User to hide from the Notary the exact amount of commitments thus - // increasing User privacy against the Notary. - // The root was made known to the Notary before the Notary opened his garbled circuits - // to the User - pub merkle_root: [u8; 32], - - // The total leaf count in the Merkle tree of commitments. Provided by the User to the Verifier - // to enable merkle proof verification. - pub merkle_tree_leaf_count: usize, - - // A proof that all [commitments] are the leaves of the Merkle tree + signature: Option>, + + /// A PRG seeds from which to generate garbled circuit active labels, see + /// [crate::commitment::CommitmentType::labels_blake3] + label_seed: LabelSeed, + + /// The root of the Merkle tree of all the commitments. The User must prove that each one of the + /// `commitments` is included in the Merkle tree. + /// This approach allows the User to hide from the Notary the exact amount of commitments thus + /// increasing User privacy against the Notary. + /// The root was made known to the Notary before the Notary opened his garbled circuits + /// to the User. + merkle_root: [u8; 32], + + /// The total leaf count in the Merkle tree of commitments. Provided by the User to the Verifier + /// to enable merkle proof verification. + merkle_tree_leaf_count: usize, + + /// A proof that all [commitments] are the leaves of the Merkle tree #[serde(serialize_with = "merkle_proof_serialize")] - pub merkle_multi_proof: MerkleProof, + merkle_multi_proof: MerkleProof, - // User's commitments to various portions of the TLS transcripts, sorted ascendingly by id + /// User's commitments to various portions of the notarized data, sorted ascendingly by id commitments: Vec, - // Openings for the commitments, sorted ascendingly by id + /// Openings for the commitments, sorted ascendingly by id commitment_openings: Vec, } -/// Serialize the [MerkleProof] type using its native `serialize` method -fn merkle_proof_serialize( - proof: &MerkleProof, - serializer: S, -) -> Result -where - S: Serializer, -{ - let bytes = proof.serialize::(); - serializer.serialize_bytes(&bytes) -} - impl VerifierDoc { - /// Creates a new doc. This method is called by the User. When passing the created doc - /// to the Verifier, the User must convert this doc into VerifierDocUnchecked + /// Creates a new document. This method is called only by the User. + /// [VerifierDoc] is never passed directly to the Verifier. Instead, the User must convert + /// it into [VerifierDocUnchecked] pub fn new( version: u8, tls_doc: TLSDoc, @@ -84,43 +72,62 @@ impl VerifierDoc { } } - /// Returns a new VerifierDoc after performing all sanity checks. This is the only way - /// for the Verifier to derive VerifierDoc + /// Returns a new [VerifierDoc] after performing all validation checks. This is the only way + /// for the Verifier (who was NOT acting as the Notary) to derive [VerifierDoc]. pub fn from_unchecked(unchecked: VerifierDocUnchecked) -> Result { - // Performs the following sanity checks: - // - // - at least one commitment is present - checks::check_at_least_one_commitment_present(&unchecked)?; + checks::perform_checks(&unchecked)?; - // - commitments and openings have their ids incremental and ascending - checks::check_commitment_and_opening_ids(&unchecked)?; + // Make sure the Notary's signature is present. + // (If the Verifier IS also the Notary then the signature is NOT needed. `VerifierDoc` + // should be created with `from_unchecked_with_signed_data()` instead.) - // - commitment count equals opening count - checks::check_commitment_and_opening_count_equal(&unchecked)?; - - // - ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing - checks::check_ranges_inside_each_commitment(&unchecked)?; + if unchecked.signature.is_none() { + return Err(Error::SignatureExpected); + } - // - the length of each opening equals the amount of committed data in the ranges of the - // corresponding commitment - // - the total amount of committed data is less than 1GB to prevent DoS - checks::check_commitment_sizes(&unchecked)?; + Ok(Self { + version: unchecked.version, + tls_doc: unchecked.tls_doc, + signature: unchecked.signature, + label_seed: unchecked.label_seed, + merkle_root: unchecked.merkle_root, + merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, + merkle_multi_proof: unchecked.merkle_multi_proof, + commitments: unchecked.commitments, + commitment_openings: unchecked.commitment_openings, + }) + } - // - the amount of commitments is less that 1000 - checks::check_commitment_count(&unchecked)?; + /// Returns a new VerifierDoc after performing all validation checks and adding the signed data. + /// This is the only way for the Verifier who acted as the Notary to derive [VerifierDoc]. + /// `signed_data` (despite its name) is not actually signed because it was generated locally by + /// the calling Verifier. + pub fn from_unchecked_with_signed_data( + unchecked: VerifierDocUnchecked, + signed_data: Signed, + ) -> Result { + checks::perform_checks(&unchecked)?; + + // Make sure the Notary's signature is NOT present. + // (If the Verifier is NOT the Notary then the Notary's signature IS needed. `VerifierDoc` + // should be created with `from_unchecked()` instead.) + + if unchecked.signature.is_some() { + return Err(Error::SignatureNotExpected); + } - // - overlapping openings must match exactly - checks::check_overlapping_openings(&unchecked)?; + // insert our `signed_data` which we know is correct - // - each [merkle_tree_index] is both unique and also ascending between commitments - checks::check_merkle_tree_indices(&unchecked); + let tls_doc = TLSDoc::new(signed_data.tls, unchecked.tls_doc.committed_tls().clone()); + let label_seed = signed_data.label_seed; + let merkle_root = signed_data.merkle_root; Ok(Self { version: unchecked.version, - tls_doc: unchecked.tls_doc, + tls_doc, signature: unchecked.signature, - label_seed: unchecked.label_seed, - merkle_root: unchecked.merkle_root, + label_seed, + merkle_root, merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, merkle_multi_proof: unchecked.merkle_multi_proof, commitments: unchecked.commitments, @@ -128,9 +135,11 @@ impl VerifierDoc { }) } - /// verifies the Doc + /// Verifies the document. This includes verifying: + /// - the TLS document + /// - the inclusion of commitments in the Merkle tree + /// - each commitment pub fn verify(&self, dns_name: String) -> Result<(), Error> { - // verify the TLS portion of the doc. The cert must contain dns_name self.tls_doc.verify(dns_name)?; self.verify_merkle_proofs()?; @@ -140,16 +149,19 @@ impl VerifierDoc { Ok(()) } - /// Verifies that each commitment is present in the Merkle tree. Note that we already checked - /// in [checks::check_merkle_tree_indices] that indices are unique and ascending + /// Verifies that each commitment is present in the Merkle tree. + /// + /// Note that we already checked in [checks::check_merkle_tree_indices] that indices are + /// unique and ascending fn verify_merkle_proofs(&self) -> Result<(), Error> { // collect all merkle tree leaf indices and corresponding hashes let (leaf_indices, leaf_hashes): (Vec, Vec<[u8; 32]>) = self .commitments .iter() - .map(|c| (c.merkle_tree_index, c.commitment)) + .map(|c| (c.merkle_tree_index(), c.commitment())) .unzip(); + // verify the inclusion of multiple leaves if !self.merkle_multi_proof.verify( self.merkle_root, &leaf_indices, @@ -162,6 +174,7 @@ impl VerifierDoc { Ok(()) } + /// Verifies commitments to notarized data fn verify_commitments(&self) -> Result<(), Error> { self.verify_label_commitments()?; @@ -170,39 +183,55 @@ impl VerifierDoc { Ok(()) } - // Verify each label commitment against its opening + /// Verifies each garbled circuit labels commitment against its opening fn verify_label_commitments(&self) -> Result<(), Error> { - // collect only label commitments + // collect only labels commitments let label_commitments: Vec<&Commitment> = self .commitments .iter() - .filter(|c| c.typ == CommitmentType::labels_blake3) + .filter(|c| *c.typ() == CommitmentType::labels_blake3) .collect(); // map each opening to its id let mut openings_ids: HashMap = HashMap::new(); for o in &self.commitment_openings { - openings_ids.insert(o.id, o); + openings_ids.insert(o.id(), o); } // collect only openings corresponding to label commitments let mut openings: Vec<&CommitmentOpening> = Vec::with_capacity(label_commitments.len()); for c in &label_commitments { - match openings_ids.get(&c.id) { + match openings_ids.get(&c.id()) { Some(opening) => openings.push(opening), // should never happen since we already checked that each opening has a - // corresponding commitment in [VerifierDoc::from_unchecked()] + // corresponding commitment in [super::checks::check_commitment_and_opening_ids()] _ => return Err(Error::InternalError), } } - // verify each (commitment, opening) pair + // verify each (opening, commitment) pair for (o, c) in openings.iter().zip(label_commitments) { - c.verify(o, &self.label_seed)?; + c.verify(o, Box::new(self.label_seed) as Box)?; } Ok(()) } + + pub fn signature(&self) -> &Option> { + &self.signature + } + + pub fn label_seed(&self) -> &LabelSeed { + &self.label_seed + } + + pub fn merkle_root(&self) -> &[u8; 32] { + &self.merkle_root + } + + pub fn tls_doc(&self) -> &TLSDoc { + &self.tls_doc + } } /// This is the [VerifierDoc] in its unchecked form. This is the form in which the doc is received @@ -210,14 +239,24 @@ impl VerifierDoc { pub struct VerifierDocUnchecked { /// All fields are exactly as in [VerifierDoc] version: u8, - pub tls_doc: TLSDoc, - pub signature: Option>, - pub label_seed: LabelSeed, - pub merkle_root: [u8; 32], - pub merkle_tree_leaf_count: usize, - pub merkle_multi_proof: MerkleProof, - pub commitments: Vec, - pub commitment_openings: Vec, + tls_doc: TLSDoc, + signature: Option>, + label_seed: LabelSeed, + merkle_root: [u8; 32], + merkle_tree_leaf_count: usize, + merkle_multi_proof: MerkleProof, + commitments: Vec, + commitment_openings: Vec, +} + +impl VerifierDocUnchecked { + pub fn commitments(&self) -> &Vec { + &self.commitments + } + + pub fn commitment_openings(&self) -> &Vec { + &self.commitment_openings + } } /// Converts VerifierDoc into an unchecked type with will be passed to the Verifier @@ -236,3 +275,15 @@ impl std::convert::From for VerifierDocUnchecked { } } } + +/// Serialize the [MerkleProof] type using its native `serialize` method +fn merkle_proof_serialize( + proof: &MerkleProof, + serializer: S, +) -> Result +where + S: Serializer, +{ + let bytes = proof.serialize::(); + serializer.serialize_bytes(&bytes) +} diff --git a/verifier/src/webpki_utils.rs b/verifier/src/webpki_utils.rs index dfc5ab27bd..863983e786 100644 --- a/verifier/src/webpki_utils.rs +++ b/verifier/src/webpki_utils.rs @@ -1,7 +1,10 @@ -use super::tls_doc::{ - CertDER, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, SignatureKeyExchangeParams, +use super::{ + tls_doc::{ + CertDER, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, + SignatureKeyExchangeParams, + }, + Error, }; -use crate::Error; use x509_parser::{certificate, prelude::FromDer}; type SignatureAlgorithms = &'static [&'static webpki::SignatureAlgorithm]; @@ -23,13 +26,13 @@ static SUPPORTED_SIG_ALGS: SignatureAlgorithms = &[ &webpki::RSA_PKCS1_3072_8192_SHA384, ]; -/// Verifier that the x509 certificate `chain` was valid at the given `time`. +/// Verifies that the x509 certificate `chain` was valid at the given `time`. /// The end entity certificate must be the last in the `chain`. pub fn verify_cert_chain(chain: &[CertDER], time: u64) -> Result<(), Error> { let time = webpki::Time::from_seconds_since_unix_epoch(time); let anchor = &webpki_roots::TLS_SERVER_ROOTS; - let last_cert_der = extract_leaf_cert(chain)?; + let last_cert_der = extract_end_entity_cert(chain)?; // Parse the DER into x509. Since webpki doesn't expose the parser, // we use x509-parser instead @@ -59,11 +62,13 @@ pub fn verify_cert_chain(chain: &[CertDER], time: u64) -> Result<(), Error> { } /// Verifies the signature over the TLS key exchange parameters +/// +/// * cert - Certificate which signed the key exchange parameters +/// * sig_ke_params - Signature over the parameters +/// * ephem_pubkey, client_random, server_random - Parameters which were signed pub fn verify_sig_ke_params( - // certificate which signed the key exchange parameters cert: &CertDER, sig_ke_params: &SignatureKeyExchangeParams, - // the following three are the parameters that were signed ephem_pubkey: &EphemeralECPubkey, client_random: &[u8], server_random: &[u8], @@ -72,42 +77,45 @@ pub fn verify_sig_ke_params( .map_err(|e| Error::WebpkiError(e.to_string()))?; // curve constant from the TLS spec - let curve_const = match &ephem_pubkey.typ { + let curve_const = match &ephem_pubkey.typ() { EphemeralECPubkeyType::P256 => [0x00, 0x17], _ => return Err(Error::UnknownCurveInKeyExchange), }; + // type of the public key from the TLS spec: 0x03 = "named_curve" + let pubkey_type = [0x03]; + // message that was signed let msg = [ client_random, server_random, - &[0x03], // type of the public key 0x03 = named_curve + &pubkey_type, &curve_const, - &[ephem_pubkey.pubkey.len() as u8], // pubkey length - &ephem_pubkey.pubkey, // pubkey + &[ephem_pubkey.pubkey().len() as u8], // pubkey length + ephem_pubkey.pubkey(), // pubkey ] .concat(); - // we can't use [webpki::SignatureAlgorithm] in [SignatureKeyExchangeParams::alg] - // because it is not Clone. Instead we match: - let sigalg = match &sig_ke_params.alg { + // we don't use [webpki::SignatureAlgorithm] in [SignatureKeyExchangeParams::alg] + // because it requires a custom serializer. Instead we match: + let sigalg = match &sig_ke_params.alg() { SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256 => &webpki::RSA_PKCS1_2048_8192_SHA256, SigKEParamsAlg::ECDSA_P256_SHA256 => &webpki::ECDSA_P256_SHA256, _ => return Err(Error::UnknownSigningAlgorithmInKeyExchange), }; - cert.verify_signature(sigalg, &msg, &sig_ke_params.sig) + cert.verify_signature(sigalg, &msg, sig_ke_params.sig()) .map_err(|e| Error::WebpkiError(e.to_string()))?; Ok(()) } -// check that the hostname is present in the cert -pub fn check_hostname_present_in_cert(cert: &CertDER, hostname: String) -> Result<(), Error> { +/// Checks that the DNS name is present in the certificate +pub fn check_dns_name_present_in_cert(cert: &CertDER, dns_name: String) -> Result<(), Error> { let cert = webpki::EndEntityCert::try_from(cert.as_slice()) .map_err(|e| Error::WebpkiError(e.to_string()))?; - let dns_name = webpki::DnsNameRef::try_from_ascii_str(hostname.as_str()) + let dns_name = webpki::DnsNameRef::try_from_ascii_str(dns_name.as_str()) .map_err(|e| Error::WebpkiError(e.to_string()))?; cert.verify_is_valid_for_dns_name(dns_name) @@ -116,8 +124,8 @@ pub fn check_hostname_present_in_cert(cert: &CertDER, hostname: String) -> Resul Ok(()) } -/// Returns the leaf certificate from the chain (the last one) -pub fn extract_leaf_cert(chain: &[CertDER]) -> Result { +/// Returns the end-entity certificate from the chain (the last one) +pub fn extract_end_entity_cert(chain: &[CertDER]) -> Result { match chain.last() { None => Err(Error::EmptyCertificateChain), Some(last) => Ok(last.clone()), @@ -226,15 +234,12 @@ mod test { let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(RSA_SIG); - let sig = SignatureKeyExchangeParams { - alg: SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, - sig: sig.to_vec(), - }; + let sig = SignatureKeyExchangeParams::new( + SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, + sig.to_vec(), + ); - let pubkey = EphemeralECPubkey { - pubkey: pubkey.to_vec(), - typ: EphemeralECPubkeyType::P256, - }; + let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); assert!(verify_sig_ke_params(&RSA_CERT.to_vec(), &sig, &pubkey, cr, sr).is_ok()); } @@ -247,15 +252,9 @@ mod test { let pubkey: &[u8] = &to_hex(ECDSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(ECDSA_SIG); - let sig = SignatureKeyExchangeParams { - alg: SigKEParamsAlg::ECDSA_P256_SHA256, - sig: sig.to_vec(), - }; + let sig = SignatureKeyExchangeParams::new(SigKEParamsAlg::ECDSA_P256_SHA256, sig.to_vec()); - let pubkey = EphemeralECPubkey { - pubkey: pubkey.to_vec(), - typ: EphemeralECPubkeyType::P256, - }; + let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); assert!(verify_sig_ke_params(&ECDSA_CERT.to_vec(), &sig, &pubkey, cr, sr).is_ok()); } @@ -268,15 +267,12 @@ mod test { let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(RSA_SIG); - let sig = SignatureKeyExchangeParams { - alg: SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, - sig: sig.to_vec(), - }; + let sig = SignatureKeyExchangeParams::new( + SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, + sig.to_vec(), + ); - let pubkey = EphemeralECPubkey { - pubkey: pubkey.to_vec(), - typ: EphemeralECPubkeyType::P256, - }; + let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); let mut cr = cr.to_vec(); // corrupt the last byte of client random @@ -306,15 +302,12 @@ mod test { let (corrupted, _) = last.overflowing_add(1); sig.push(corrupted); - let sig = SignatureKeyExchangeParams { - alg: SigKEParamsAlg::ECDSA_P256_SHA256, - sig: sig.to_vec(), - }; + let sig = SignatureKeyExchangeParams::new( + SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, + sig.to_vec(), + ); - let pubkey = EphemeralECPubkey { - pubkey: pubkey.to_vec(), - typ: EphemeralECPubkeyType::P256, - }; + let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); let err = verify_sig_ke_params(&ECDSA_CERT.to_vec(), &sig, &pubkey, cr, sr); assert_eq!( @@ -327,14 +320,14 @@ mod test { #[test] fn test_check_hostname_present_in_cert() { let host = String::from("tlsnotary.org"); - assert!(check_hostname_present_in_cert(&EE.to_vec(), host).is_ok()); + assert!(check_dns_name_present_in_cert(&EE.to_vec(), host).is_ok()); } // Expect to fail because the host name is not in the cert #[test] fn test_check_hostname_present_in_cert_bad_host() { let host = String::from("tlsnotary"); - let err = check_hostname_present_in_cert(&EE.to_vec(), host); + let err = check_dns_name_present_in_cert(&EE.to_vec(), host); let _str = String::from("CertNotValidForName"); assert_eq!( err.unwrap_err(), @@ -346,7 +339,7 @@ mod test { #[test] fn test_check_hostname_present_in_cert_invalid_dns_name() { let host = String::from("tlsnotary.org%"); - let err = check_hostname_present_in_cert(&EE.to_vec(), host); + let err = check_dns_name_present_in_cert(&EE.to_vec(), host); assert_eq!( err.unwrap_err(), Error::WebpkiError("InvalidDnsNameError".to_string()) From 5444fc345aa0aeecaa931c265c68d4944a22e5f6 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Thu, 26 Jan 2023 10:34:22 +0200 Subject: [PATCH 03/23] final touchups --- verifier/src/commitment.rs | 42 +++-- verifier/src/label_encoder.rs | 152 ++++++++--------- verifier/src/lib.rs | 148 ++++++++++++----- verifier/src/signed.rs | 53 ++++-- verifier/src/tls_doc.rs | 4 +- verifier/src/utils.rs | 296 ++++++++++++++++++++++++++++------ verifier/src/verifier_doc.rs | 41 ++++- 7 files changed, 549 insertions(+), 187 deletions(-) diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs index 1fe82c724d..f7148bc401 100644 --- a/verifier/src/commitment.rs +++ b/verifier/src/commitment.rs @@ -1,4 +1,4 @@ -use super::{error::Error, utils::compute_label_commitment, LabelSeed}; +use super::{error::Error, utils::compute_label_commitment, HashCommitment, LabelSeed}; use serde::Serialize; use std::any::Any; @@ -11,7 +11,7 @@ pub struct Commitment { // The index of this commitment in the Merkle tree of commitments merkle_tree_index: usize, // The actual commitment - commitment: [u8; 32], + commitment: HashCommitment, // The absolute byte ranges within the notarized data. The committed data // is located in those ranges. ranges: Vec, @@ -22,7 +22,7 @@ impl Commitment { id: usize, typ: CommitmentType, direction: Direction, - commitment: [u8; 32], + commitment: HashCommitment, ranges: Vec, merkle_tree_index: usize, ) -> Self { @@ -45,12 +45,19 @@ impl Commitment { ) -> Result<(), Error> { let expected = match self.typ { CommitmentType::labels_blake3 => { - let seed = match extra_data.downcast::() { - Ok(seed) => *seed, - Err(_) => return Err(Error::InternalError), - }; - - compute_label_commitment(&opening.opening, &self.ranges, &seed, opening.salt())? + let (seed, cipher_block_size) = + match extra_data.downcast::() { + Ok(extra_data) => (extra_data.label_seed, extra_data.cipher_block_size), + Err(_) => return Err(Error::InternalError), + }; + + compute_label_commitment( + &opening.opening, + &self.ranges, + &seed, + opening.salt(), + cipher_block_size, + )? } _ => return Err(Error::InternalError), }; @@ -129,7 +136,7 @@ pub enum Direction { Response, } -#[derive(Serialize, Clone)] +#[derive(Serialize, Clone, Debug)] /// A half-open range [start, end). Range bounds are ascending i.e. start < end pub struct Range { start: usize, @@ -149,3 +156,18 @@ impl Range { self.end } } + +pub struct LabelSeedAndCipherBlockSize { + label_seed: LabelSeed, + cipher_block_size: usize, +} + +/// Extra data for [CommitmentType::labels_blake3] commitments +impl LabelSeedAndCipherBlockSize { + pub fn new(label_seed: LabelSeed, cipher_block_size: usize) -> Self { + Self { + label_seed, + cipher_block_size, + } + } +} diff --git a/verifier/src/label_encoder.rs b/verifier/src/label_encoder.rs index 3088348273..d3a137d964 100644 --- a/verifier/src/label_encoder.rs +++ b/verifier/src/label_encoder.rs @@ -1,96 +1,102 @@ -use std::collections::HashMap; - -use rand::{Rng, SeedableRng}; +//! Adapted from tlsn/mpc/mpc-core, except [encode() in](ChaChaEncoder) was modified to encode 1 bit +//! at a time +use super::LabelSeed; +use rand::{CryptoRng, Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; +use std::ops::BitXor; + +const DELTA_STREAM_ID: u64 = u64::MAX; +const PLAINTEXT_STREAM_ID: u64 = 1; + +#[derive(Clone, Copy)] +pub struct Block(u128); + +impl Block { + #[inline] + pub fn new(b: u128) -> Self { + Self(b) + } + + #[inline] + pub fn random(rng: &mut R) -> Self { + Self::new(rng.gen()) + } + + #[inline] + pub fn set_lsb(&mut self) { + self.0 |= 1; + } + + #[inline] + pub fn inner(&self) -> u128 { + self.0 + } +} + +impl BitXor for Block { + type Output = Self; + + #[inline] + fn bitxor(self, other: Self) -> Self::Output { + Self(self.0 ^ other.0) + } +} -/// Encodes wire labels using the ChaCha algorithm and a global offset (delta). +/// Global binary offset used by the Free-XOR technique to create wire label +/// pairs where W_1 = W_0 ^ Delta. /// -/// An encoder instance is configured using a domain id. Domain ids can be used in combination -/// with stream ids to partition label sets. -#[derive(Debug)] +/// In accordance with the (p&p) permute-and-point technique, the LSB of delta is set to 1 so +/// the permute bit LSB(W_1) = LSB(W_0) ^ 1 +#[derive(Clone, Copy)] +pub struct Delta(Block); + +impl Delta { + /// Creates new random Delta + pub(crate) fn random(rng: &mut R) -> Self { + let mut block = Block::random(rng); + block.set_lsb(); + Self(block) + } + + /// Returns the inner block + #[inline] + pub(crate) fn into_inner(self) -> Block { + self.0 + } +} + +/// Encodes wires into labels using the ChaCha algorithm. pub struct ChaChaEncoder { - seed: [u8; 32], - domain: u32, rng: ChaCha20Rng, - stream_state: HashMap, - delta: u128, + delta: Delta, } impl ChaChaEncoder { /// Creates a new encoder with the provided seed /// /// * `seed` - 32-byte seed for ChaChaRng - /// * `domain` - Domain id - /// - /// Domain id must be less than 2^31 - pub fn new(seed: [u8; 32], domain: u32) -> Self { - assert!(domain <= u32::MAX >> 1); - + pub fn new(seed: LabelSeed) -> Self { let mut rng = ChaCha20Rng::from_seed(seed); - // Stream id 0 is reserved to generate delta. + // Stream id u64::MAX is reserved to generate delta. // This way there is only ever 1 delta per seed - rng.set_stream(0); - let delta: u128 = rng.gen(); - - Self { - seed, - domain, - rng, - stream_state: HashMap::default(), - delta, - } - } + rng.set_stream(DELTA_STREAM_ID); + let delta = Delta::random(&mut rng); - /// Returns encoder's rng seed - pub fn get_seed(&self) -> [u8; 32] { - self.seed + Self { rng, delta } } - /// Returns next 8 label pairs + /// Encodes one bit of plaintext into two labels /// - /// * `stream_id` - Stream id which can be used to partition label sets - /// * `input` - Circuit input to encode - pub fn labels_for_next_byte(&mut self, stream_id: u32) -> Vec<[u128; 2]> { - self.set_stream(stream_id); - (0..8) - .map(|_| { - //test - let zero_label: u128 = self.rng.gen(); - let one_label = zero_label ^ self.delta; - [zero_label, one_label] - }) - .collect() - } - - /// Sets the selected stream id, restoring word position if a stream - /// has been used before. - fn set_stream(&mut self, id: u32) { - // MSB -> LSB - // 31 bits 32 bits 1 bit - // [domain] [id] [reserved] - // The reserved bit ensures that we never pull from stream 0 which - // is reserved to generate delta - let new_id = ((self.domain as u64) << 33) + ((id as u64) << 1) + 1; - - let current_id = self.rng.get_stream(); - - // noop if stream already set - if new_id == current_id { - return; - } - - // Store word position for current stream - self.stream_state - .insert(current_id, self.rng.get_word_pos()); + /// * `pos` - The position of a bit which needs to be encoded + pub fn encode(&mut self, pos: usize) -> [Block; 2] { + self.rng.set_stream(PLAINTEXT_STREAM_ID); - // Update stream id - self.rng.set_stream(new_id); + // jump to the multiple-of-128 bit offset (128 bits is the size of one label) + self.rng.set_word_pos((pos as u128) * 4); - // Get word position if stored, otherwise default to 0 - let word_pos = self.stream_state.get(&new_id).copied().unwrap_or(0); + let zero_label = Block::random(&mut self.rng); - // Update word position - self.rng.set_word_pos(word_pos); + [zero_label, zero_label ^ self.delta.into_inner()] } } diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index dee9db03b3..edc89327be 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -12,15 +12,17 @@ mod webpki_utils; use crate::signed::Signed; use error::Error; use pubkey::PubKey; -use utils::blake3; use verifier_doc::{VerifierDoc, VerifierDocUnchecked}; type HashCommitment = [u8; 32]; +/// A PRG seeds from which to generate garbled circuit active labels, see +/// [crate::commitment::CommitmentType::labels_blake3] +type LabelSeed = [u8; 32]; /// Verifier of the notarization document /// /// Once the verification succeeds, an application level (e.g. HTTP, JSON) parser can -/// parse `commitment_openings` in [VerifierDoc] +/// parse `commitment_openings` in `doc` struct Verifier { /// A validated notarization document which needs to be verified doc: VerifierDoc, @@ -83,23 +85,21 @@ impl Verifier { } } -/// A PRG seeds from which to generate garbled circuit active labels, see -/// [crate::commitment::CommitmentType::labels_blake3] -type LabelSeed = [u8; 32]; - #[test] // Create a document and verify it fn e2e_test() { use crate::{ commitment::{Commitment, CommitmentOpening, CommitmentType, Direction, Range}, + label_encoder::{Block, ChaChaEncoder}, signed::SignedTLS, tls_doc::{ CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, SignatureKeyExchangeParams, TLSDoc, }, - utils::bytes_in_ranges, + utils::{blake3, bytes_in_ranges, u8vec_to_boolvec}, Signed, }; + use blake3::Hasher; use p256::ecdsa::{signature::Signer, SigningKey, VerifyingKey}; use pubkey::KeyType; use rand::Rng; @@ -107,8 +107,14 @@ fn e2e_test() { let mut rng = rand::thread_rng(); - // After the webserver sends the Server Key Exchange message (during the TLS handshake), - // the tls-client module provides the following TLS data: + // The size in bytes of one block of the cipher that was computed inside the garbled circuit + // (16 for AES, 64 for ChaCha) + let cipher_block_size = 16; + + let plaintext = b"This important data will be notarized"; + + // -------- After the webserver sends the Server Key Exchange message (during the TLS handshake), + // the tls-client module provides the following TLS data: /// end entity cert static EE: &[u8] = include_bytes!("testdata/tlsnotary.org/ee.der"); @@ -123,39 +129,90 @@ fn e2e_test() { // data taken from an actual network trace captured with `tcpdump host tlsnotary.org -w out.pcap` // (see testdata/key_exchange/README for details) - let cr = + let client_random = hex::decode("ac3808970faf996d38864e205c6b787a1d05f681654a5d2a3c87f7dd2f13332e").unwrap(); - let sr = + let server_random = hex::decode("8abf9a0c4b3b9694edac3d19e8eb7a637bfa8fe5644bd9f1444f574e47524401").unwrap(); - let eph_pk = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); + let ephemeral_pubkey = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); let sig = hex::decode("337aa65793562550f6de0a9c792b5f531a96bb78f65a2063f710bfb99e11c791e13d35c798b50eea1351c14efc526009c7836e888206cebde7135130a1fbc049d42e1d1ed05c10f0d108b9540f049ac24fe1076d391b9da3d4e60b5cb8f341bda993f6002873847be744c1955ff575b2d833694fb8a432898c5ac55752e2bddcee4c07371335e1a6581694df43c6eb0ce8da4cdd497c205607b573f9c5d17c951e0a71fbf967c4bff53fc37c597b2f5656478fefb780e8f37bd8409985dd980eda4f254c7dce76dc69e66ed27c0f2c93b53a6dfd7b27359e1589a30d483725e92305766c62d6cad2c0142d3a3c4a2272e6d81eda2886ef12028167f83b3c33ea").unwrap(); let params = SignatureKeyExchangeParams::new(SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, sig); - let eph_ec = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, eph_pk); + let ephemeral_pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, ephemeral_pubkey); - // Using the above data, the User computes [CommittedTLS] and sends a commitment to the Notary + // -------- Using the above data, the User computes [CommittedTLS] and sends a commitment to + // the Notary - let committed_tls = CommittedTLS::new(cert_chain, params, cr, sr); + let committed_tls = CommittedTLS::new(cert_chain, params, client_random, server_random); let commitment_to_tls = blake3(&committed_tls.serialize().unwrap()); - // ---------- After the notar. session is over: + // -------- The Notary generates garbled circuit's labels from a PRG seed. One pair of labels + // for each bit of plaintext + + let label_seed: LabelSeed = rng.gen(); + + let mut enc = ChaChaEncoder::new(label_seed); + + // Note that for this test's purposes the Notary is using crate::label_encoder. + // In production he will use tlsn/mpc/mpc-core/garble/label/encoder + let full_labels: Vec<[Block; 2]> = (0..plaintext.len() * 8).map(|i| enc.encode(i)).collect(); + + // -------- The User retrieves her active labels using Oblivious Transfer (simulated below): + + // convert plaintext into bits by splitting up the plaintext into 16-byte AES blocks, then + // making each block's bit ordering lsb0 + let bits: Vec = plaintext + .chunks(cipher_block_size) + .flat_map(|chunk| { + let mut bits = u8vec_to_boolvec(chunk); + bits.reverse(); + bits + }) + .collect(); - // The User computes all her commitments - // Here we'll have 1 (salted) commitment which has 1 byterange + let all_active_labels: Vec = full_labels + .iter() + .zip(bits) + .map( + |(label_pair, bit)| { + if bit { + label_pair[1] + } else { + label_pair[0] + } + }, + ) + .collect(); + + // ---------- After the notar. session is over: -------- + + // -------- The User computes all her commitments + + // Here we'll have 1 (salted) commitment which has 1 range + + let ranges = vec![Range::new(5, 19)]; - let plaintext = b"This data will be notarized"; - let ranges = vec![Range::new(2, 8)]; let salt: [u8; 32] = rng.gen(); - // Note that the User will NOT be actually calling compute_label_commitment(). He doesn't - // have label_seed at this point of the protocol. Instead, the User will - // flatten all his active labels, select those which are located within ranges and will - // hash them. - // - let label_seed = rng.gen(); - let hash_commitment = - utils::compute_label_commitment(plaintext, &ranges, &label_seed, &salt.to_vec()).unwrap(); + // hash all the active labels in the commitment's ranges + let mut hasher = Hasher::new(); + + // due to lsb0 ordering of labels, we need to split up each range into individual ranges covering + // each block and then flip each individual range + for r in &ranges { + let block_ranges = utils::split_into_block_ranges(r, cipher_block_size); + for br in &block_ranges { + let flipped_range = utils::flip_range(br, cipher_block_size); + for label in + all_active_labels[flipped_range.start() * 8..flipped_range.end() * 8].iter() + { + hasher.update(&label.inner().to_be_bytes()); + } + } + } + // add salt + hasher.update(&salt); + let hash_commitment: HashCommitment = hasher.finalize().into(); let comm = Commitment::new( 0, @@ -166,29 +223,35 @@ fn e2e_test() { 0, ); - // The User creates a merkle tree of commitments and then a merkle proof of inclusion. - // Sends the merkle_root to the Notary + // -------- The User creates a merkle tree of commitments and then a merkle proof of inclusion. + // Sends the merkle_root to the Notary let leaves = [hash_commitment]; let merkle_tree = MerkleTree::::from_leaves(&leaves); let merkle_root = merkle_tree.root().unwrap(); - // the Notary uses his pubkey to compute a signature + // -------- the Notary uses his pubkey to compute a signature let signing_key = SigningKey::random(&mut rng); let verifying_key = VerifyingKey::from(&signing_key); let encoded = verifying_key.to_encoded_point(true); let pubkey_bytes = encoded.as_bytes(); // (note that ephemeralECPubkey is known both to the User and the Notary) - let signed_tls = SignedTLS::new(TIME, eph_ec, commitment_to_tls); - let signed = Signed::new(signed_tls.clone(), label_seed, merkle_root); + let signed_tls = SignedTLS::new(TIME, ephemeral_pubkey, commitment_to_tls); + let signed = Signed::new( + signed_tls.clone(), + label_seed, + merkle_root, + cipher_block_size, + ); let signature = signing_key.sign(&bincode::serialize(&signed).unwrap()); let sig_der = signature.to_der(); let signature = sig_der.as_bytes(); - // the Notary reveals `label_seed` and also sends the `signature` and `time`. - // After that the User creates a doc for the Verifier: - // (The User creates `signed_tls` just like the Notary did above) + // -------- the Notary reveals `label_seed` and also sends the `signature` and `time`. + + // -------- After that the User creates a doc for the Verifier: + // (The User creates `signed_tls` just like the Notary did above) let tls_doc = TLSDoc::new(signed_tls, committed_tls); // prepares openings and merkle proofs for those openings @@ -204,16 +267,17 @@ fn e2e_test() { Some(signature.to_vec()), label_seed, merkle_root, + cipher_block_size, 1, proof, vec![comm], vec![open], ); - // The User converts the doc into an unchecked type and passes it to the Verifier + // -------- The User converts the doc into an unchecked type and passes it to the Verifier let doc_unchecked: VerifierDocUnchecked = doc.into(); - // The Verifier verifies the doc: + // -------- The Verifier verifies the doc: // Initially the Verifier may store the Notary's pubkey as bytes. Converts it into // PubKey type @@ -222,4 +286,12 @@ fn e2e_test() { let verifier = Verifier::new(doc_unchecked, Some(trusted_pubkey)).unwrap(); verifier.verify("tlsnotary.org".to_string()).unwrap(); + + // -------- The Verifier proceeds to put each verified commitment opening through an application + // level (e.g. http) parser + + assert_eq!( + String::from_utf8(verifier.doc.commitment_openings()[0].opening().clone()).unwrap(), + "important data".to_string() + ); } diff --git a/verifier/src/signed.rs b/verifier/src/signed.rs index cb2ebc1c52..b54dc22148 100644 --- a/verifier/src/signed.rs +++ b/verifier/src/signed.rs @@ -7,21 +7,21 @@ pub struct SignedTLS { // notarization time against which the TLS Certificate validity is checked time: u64, // ephemeral pubkey for ECDH key exchange - ephemeralECPubkey: EphemeralECPubkey, + ephemeral_ec_pubkey: EphemeralECPubkey, /// User's commitment to [super::tls_doc::CommittedTLS] - pub commitment_to_TLS: HashCommitment, + commitment_to_tls: HashCommitment, } impl SignedTLS { pub fn new( time: u64, - ephemeralECPubkey: EphemeralECPubkey, - commitment_to_TLS: HashCommitment, + ephemeral_ec_pubkey: EphemeralECPubkey, + commitment_to_tls: HashCommitment, ) -> Self { Self { time, - ephemeralECPubkey, - commitment_to_TLS, + ephemeral_ec_pubkey, + commitment_to_tls, } } @@ -29,35 +29,63 @@ impl SignedTLS { self.time } - pub fn ephemeralECPubkey(&self) -> &EphemeralECPubkey { - &self.ephemeralECPubkey + pub fn ephemeral_ec_pubkey(&self) -> &EphemeralECPubkey { + &self.ephemeral_ec_pubkey + } + + pub fn commitment_to_tls(&self) -> &HashCommitment { + &self.commitment_to_tls } } /// All the data which the Notary signs #[derive(Clone, Serialize)] pub struct Signed { - pub tls: SignedTLS, + tls: SignedTLS, // see comments in [crate::VerifierDoc] for details about the fields below /// PRG seed from which garbled circuit labels are generated - pub label_seed: LabelSeed, + label_seed: LabelSeed, /// Merkle root of all the commitments - pub merkle_root: [u8; 32], + merkle_root: [u8; 32], + /// Size of the cipher's block in bytes (16 for AES, 64 for ChaCha) + cipher_block_size: usize, } impl Signed { /// Creates a new struct to be signed by the Notary - pub fn new(tls: SignedTLS, label_seed: LabelSeed, merkle_root: [u8; 32]) -> Self { + pub fn new( + tls: SignedTLS, + label_seed: LabelSeed, + merkle_root: [u8; 32], + cipher_block_size: usize, + ) -> Self { Self { tls, label_seed, merkle_root, + cipher_block_size, } } pub fn serialize(self) -> Result, Error> { bincode::serialize(&self).map_err(|_| Error::SerializationError) } + + pub fn tls(&self) -> &SignedTLS { + &self.tls + } + + pub fn label_seed(&self) -> &LabelSeed { + &self.label_seed + } + + pub fn merkle_root(&self) -> &[u8; 32] { + &self.merkle_root + } + + pub fn cipher_block_size(&self) -> usize { + self.cipher_block_size + } } /// Extracts relevant fields from the VerifierDoc. Those are the fields @@ -68,6 +96,7 @@ impl std::convert::From<&VerifierDoc> for Signed { doc.tls_doc().signed_tls().clone(), *doc.label_seed(), *doc.merkle_root(), + doc.cipher_block_size(), ) } } diff --git a/verifier/src/tls_doc.rs b/verifier/src/tls_doc.rs index a87ee23cc0..078f5d20ec 100644 --- a/verifier/src/tls_doc.rs +++ b/verifier/src/tls_doc.rs @@ -34,13 +34,13 @@ impl TLSDoc { let ee_cert = webpki_utils::extract_end_entity_cert(&self.committed_tls.tls_cert_chain)?; - self.verify_tls_commitment(&self.committed_tls, &self.signed_tls.commitment_to_TLS)?; + self.verify_tls_commitment(&self.committed_tls, self.signed_tls.commitment_to_tls())?; //check that TLS key exchange parameters were signed by the end-entity cert webpki_utils::verify_sig_ke_params( &ee_cert, &self.committed_tls.sig_ke_params, - self.signed_tls.ephemeralECPubkey(), + self.signed_tls.ephemeral_ec_pubkey(), &self.committed_tls.client_random, &self.committed_tls.server_random, )?; diff --git a/verifier/src/utils.rs b/verifier/src/utils.rs index be4ae9711c..00fc3cca92 100644 --- a/verifier/src/utils.rs +++ b/verifier/src/utils.rs @@ -1,61 +1,63 @@ -use super::{commitment::Range, Error, LabelSeed}; +use super::{commitment::Range, label_encoder::ChaChaEncoder, Error, HashCommitment, LabelSeed}; use blake3::Hasher; -use rand::{Rng, SeedableRng}; -use rand_chacha::ChaCha20Rng; -// Given a `substring` and its byte `ranges` within a larger string, computes a (`salt`ed) commitment -// to the garbled circuit labels. The labels are derived from a PRG `seed`. -pub fn compute_label_commitment( +/// Given a `substring` and its byte `ranges` within a larger string, computes a (`salt`ed) commitment +/// to the garbled circuit labels. The labels are derived from a PRG `seed`. +/// `ranges` are ordered ascendingly relative to each other. +/// +/// * cipher_block_size - The size of one block of the cipher which was computed inside the garbled +/// circuit (16 bytes for AES, 64 bytes for ChaCha) +pub(crate) fn compute_label_commitment( substring: &[u8], - ranges: &Vec, + ranges: &[Range], seed: &LabelSeed, - salt: &Vec, -) -> Result<[u8; 32], Error> { - // TODO: will need to bring this in harmony with label encoder in mpc-core - - let mut rng = ChaCha20Rng::from_seed(*seed); - let delta: u128 = rng.gen(); - // we need least-bit-first order, hence reverse() - let mut bits = u8vec_to_boolvec(substring); - bits.reverse(); - let mut bits_iter = bits.into_iter(); - - // for each bit of opening, expand the zero label at the rng stream offset - // and, if needed, flip it to the one label, then hash the label + salt: &[u8], + cipher_block_size: usize, +) -> Result { + let mut enc = ChaChaEncoder::new(*seed); + + // making a copy of the substring because we will be drain()ing it + let mut bytestring = substring.to_vec(); + let mut hasher = Hasher::new(); for r in ranges { - // set rng stream offset to the first label in range. +1 accounts for - // the delta - rng.set_word_pos(4 * ((r.start() as u128) + 1)); - - // expand as many labels as there are bits in the range - (0..(r.end() - r.start()) * 8).map(|_| { - let zero_label: u128 = rng.gen(); - let active_label = if bits_iter.next().unwrap() == true { - zero_label ^ delta - } else { - zero_label - }; - hasher.update(&active_label.to_be_bytes()); - }); + let block_ranges = split_into_block_ranges(r, cipher_block_size); + for br in &block_ranges { + let range_size = br.end() - br.start(); + let bytes_in_range: Vec = bytestring.drain(0..range_size).collect(); + + // convert bytes in the range into bits in lsb0 order + let mut bits = u8vec_to_boolvec(&bytes_in_range); + bits.reverse(); + let mut bits_iter = bits.into_iter(); + + // due to lsb0 ordering of labels, we need to flip the range bounds + let flipped_range = flip_range(br, cipher_block_size); + + // derive as many label pairs as there are bits in the range + for i in flipped_range.start() * 8..flipped_range.end() * 8 { + let label_pair = enc.encode(i); + let bit = match bits_iter.next() { + Some(bit) => bit, + // should never happen since this method is only called with ranges validated + // to correspond to the size of the substring + None => return Err(Error::InternalError), + }; + let active_label = if bit { label_pair[1] } else { label_pair[0] }; + + hasher.update(&active_label.inner().to_be_bytes()); + } + } } // add salt hasher.update(salt); Ok(hasher.finalize().into()) } -/// Returns a substring of the original bytestring containing only the bytes in `ranges` -// TODO check len overflow -pub fn bytes_in_ranges(bytestring: &[u8], ranges: &[Range]) -> Vec { - let mut substring: Vec = Vec::new(); - for r in ranges { - substring.append(&mut bytestring[r.start()..r.end()].to_vec()) - } - substring -} - +/// Converts a u8 vec into an msb0 bool vec +/// (copied from tlsn/utils) #[inline] -pub fn u8vec_to_boolvec(v: &[u8]) -> Vec { +pub(crate) fn u8vec_to_boolvec(v: &[u8]) -> Vec { let mut bv = Vec::with_capacity(v.len() * 8); for byte in v.iter() { for i in 0..8 { @@ -65,8 +67,210 @@ pub fn u8vec_to_boolvec(v: &[u8]) -> Vec { bv } -pub fn blake3(data: &[u8]) -> [u8; 32] { +/// Given the (validated) global `range` which covers multiple blocks of `block_size` each, splits +/// up the global `range` into multiple ranges each covering one block. +/// E.g. if the global `range` is [5, 35) and the `block_size` is 16, the returned ranges will be: +/// [5, 16) , [16, 32), [32, 35) +pub(crate) fn split_into_block_ranges(range: &Range, block_size: usize) -> Vec { + let range_size = range.end() - range.start(); + + // if the first block is only partially covered by the global range, store the + // partially covered size, otherwise, if it is fully covered, store 0. + let first_partial_size = { + let offset_from_block_start = range.start() % block_size; + if offset_from_block_start != 0 { + let potentially_covered_size = block_size - offset_from_block_start; + if potentially_covered_size > range_size { + // there is only one partially covered block in the global range + range_size + } else { + // potentially covered size is the actual covered size + potentially_covered_size + } + } else { + // the first block is fully covered by the global range + 0 + } + }; + + // if the last block is only partially covered by the global range, store the + // partially covered size, otherwise, if it is fully covered or if there is only one block + // total, store 0. + let last_partial_size = { + if first_partial_size == range_size { + // there is only one partially covered block in the global range + 0 + } else { + range.end() % block_size + } + }; + + let mut block_ranges: Vec = Vec::new(); + let mut start = range.start(); + let mut end = range.end(); + + let first_partial_range: Option = if first_partial_size > 0 { + // save original start + let orig_start = start; + // adjust the start of the global range + start += first_partial_size; + + Some(Range::new(orig_start, orig_start + first_partial_size)) + } else { + None + }; + + let last_partial_range: Option = if last_partial_size > 0 { + // save original end + let orig_end = end; + // adjust the end of the global range + end -= last_partial_size; + + Some(Range::new(orig_end - last_partial_size, orig_end)) + } else { + None + }; + + // now the global range covers only the full blocks + let full_block_count = (end - start) / block_size; + for i in 0..full_block_count { + // push full block ranges + block_ranges.push(Range::new( + start + i * block_size, + start + (i + 1) * block_size, + )); + } + + // if there were any partial ranges, insert them + if let Some(r) = first_partial_range { + block_ranges.insert(0, r) + }; + if let Some(r) = last_partial_range { + block_ranges.push(r) + }; + + block_ranges +} + +/// Given a byte `range` spanning only one block of `block_size`, returns a new +/// range which covers the same block's bytes after the block's bit ordering is changed to lsb0 +/// (the block is initially in msb0). +/// +/// E.g. if the original `range` is [33, 39) and `block_size` is 16, the result will be [41, 47) +pub(crate) fn flip_range(range: &Range, block_size: usize) -> Range { + // round down to the nearest multiple of `block_size` + let block_start_boundary = (range.start() / block_size) * block_size; + let block_end_boundary = block_start_boundary + block_size; + + // how far the range bounds are shifted from the block boundaries? + let shift_from_the_start = range.start() - block_start_boundary; + let shift_from_the_end = block_end_boundary - range.end(); + + Range::new( + block_start_boundary + shift_from_the_end, + block_end_boundary - shift_from_the_start, + ) +} + +/// Outputs blake3 digest +pub(crate) fn blake3(data: &[u8]) -> [u8; 32] { let mut hasher = Hasher::new(); hasher.update(data); hasher.finalize().into() } + +/// Returns a substring of the original `bytestring` containing only the bytes in `ranges`. +/// This method is only called with validated `ranges` which do not exceed the size of the +/// `bytestring`. +#[cfg(test)] +pub(crate) fn bytes_in_ranges(bytestring: &[u8], ranges: &[Range]) -> Vec { + let mut substring: Vec = Vec::new(); + for r in ranges { + substring.append(&mut bytestring[r.start()..r.end()].to_vec()) + } + substring +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_u8vec_to_boolvec() { + let mut u = vec![false; 16]; + u[7] = true; + assert_eq!(u8vec_to_boolvec(&256u16.to_be_bytes()), u); + } + + #[test] + fn test_split_into_block_ranges() { + // first partial block, last partial block, full middle block + let r = Range::new(5, 35); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 5, end: 16 }, Range { start: 16, end: 32 }, Range { start: 32, end: 35 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // first partial block, last partial block, no middle blocks + let r = Range::new(5, 25); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 5, end: 16 }, Range { start: 16, end: 25 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // only one partial block + let r = Range::new(5, 10); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 5, end: 10 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // only one full block at 0 offset + let r = Range::new(0, 16); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 0, end: 16 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // only one full block at non-zero offset + let r = Range::new(16, 32); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 16, end: 32 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // first block full, last block partial + let r = Range::new(16, 33); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 16, end: 32 }, Range { start: 32, end: 33 }]"; + assert_eq!(expected, format!("{:?}", out)); + + // first block partial, last block full + let r = Range::new(15, 32); + let out = split_into_block_ranges(&r, 16); + let expected = "[Range { start: 15, end: 16 }, Range { start: 16, end: 32 }]"; + assert_eq!(expected, format!("{:?}", out)); + } + + #[test] + fn test_flip_range() { + // block start and end match range start and end + let r = Range::new(16, 32); + let out = flip_range(&r, 16); + let expected = "Range { start: 16, end: 32 }"; + assert_eq!(expected, format!("{:?}", out)); + + // only start matches + let r = Range::new(16, 30); + let out = flip_range(&r, 16); + let expected = "Range { start: 18, end: 32 }"; + assert_eq!(expected, format!("{:?}", out)); + + // only end matches + let r = Range::new(20, 32); + let out = flip_range(&r, 16); + let expected = "Range { start: 16, end: 28 }"; + assert_eq!(expected, format!("{:?}", out)); + + // neither start nor end match + let r = Range::new(33, 39); + let out = flip_range(&r, 16); + let expected = "Range { start: 41, end: 47 }"; + assert_eq!(expected, format!("{:?}", out)); + } +} diff --git a/verifier/src/verifier_doc.rs b/verifier/src/verifier_doc.rs index a6016b0e87..c59c13db86 100644 --- a/verifier/src/verifier_doc.rs +++ b/verifier/src/verifier_doc.rs @@ -1,3 +1,5 @@ +use crate::commitment::LabelSeedAndCipherBlockSize; + use super::{ checks, commitment::{Commitment, CommitmentOpening, CommitmentType}, @@ -29,6 +31,9 @@ pub struct VerifierDoc { /// to the User. merkle_root: [u8; 32], + /// Size of the cipher's block in bytes (16 for AES, 64 for ChaCha) + cipher_block_size: usize, + /// The total leaf count in the Merkle tree of commitments. Provided by the User to the Verifier /// to enable merkle proof verification. merkle_tree_leaf_count: usize, @@ -54,6 +59,7 @@ impl VerifierDoc { signature: Option>, label_seed: LabelSeed, merkle_root: [u8; 32], + cipher_block_size: usize, merkle_tree_leaf_count: usize, merkle_multi_proof: MerkleProof, commitments: Vec, @@ -65,6 +71,7 @@ impl VerifierDoc { signature, label_seed, merkle_root, + cipher_block_size, merkle_tree_leaf_count, merkle_multi_proof, commitments, @@ -91,6 +98,7 @@ impl VerifierDoc { signature: unchecked.signature, label_seed: unchecked.label_seed, merkle_root: unchecked.merkle_root, + cipher_block_size: unchecked.cipher_block_size, merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, merkle_multi_proof: unchecked.merkle_multi_proof, commitments: unchecked.commitments, @@ -118,9 +126,13 @@ impl VerifierDoc { // insert our `signed_data` which we know is correct - let tls_doc = TLSDoc::new(signed_data.tls, unchecked.tls_doc.committed_tls().clone()); - let label_seed = signed_data.label_seed; - let merkle_root = signed_data.merkle_root; + let tls_doc = TLSDoc::new( + signed_data.tls().clone(), + unchecked.tls_doc.committed_tls().clone(), + ); + let label_seed = *signed_data.label_seed(); + let merkle_root = *signed_data.merkle_root(); + let cipher_block_size = signed_data.cipher_block_size(); Ok(Self { version: unchecked.version, @@ -128,6 +140,7 @@ impl VerifierDoc { signature: unchecked.signature, label_seed, merkle_root, + cipher_block_size, merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, merkle_multi_proof: unchecked.merkle_multi_proof, commitments: unchecked.commitments, @@ -183,9 +196,9 @@ impl VerifierDoc { Ok(()) } - /// Verifies each garbled circuit labels commitment against its opening + /// Verifies each garbled circuit label commitment against its opening fn verify_label_commitments(&self) -> Result<(), Error> { - // collect only labels commitments + // collect only label commitments let label_commitments: Vec<&Commitment> = self .commitments .iter() @@ -211,7 +224,9 @@ impl VerifierDoc { // verify each (opening, commitment) pair for (o, c) in openings.iter().zip(label_commitments) { - c.verify(o, Box::new(self.label_seed) as Box)?; + let extra_data = + LabelSeedAndCipherBlockSize::new(self.label_seed, self.cipher_block_size); + c.verify(o, Box::new(extra_data) as Box)?; } Ok(()) @@ -229,9 +244,21 @@ impl VerifierDoc { &self.merkle_root } + pub fn cipher_block_size(&self) -> usize { + self.cipher_block_size + } + pub fn tls_doc(&self) -> &TLSDoc { &self.tls_doc } + + pub fn commitments(&self) -> &Vec { + &self.commitments + } + + pub fn commitment_openings(&self) -> &Vec { + &self.commitment_openings + } } /// This is the [VerifierDoc] in its unchecked form. This is the form in which the doc is received @@ -243,6 +270,7 @@ pub struct VerifierDocUnchecked { signature: Option>, label_seed: LabelSeed, merkle_root: [u8; 32], + cipher_block_size: usize, merkle_tree_leaf_count: usize, merkle_multi_proof: MerkleProof, commitments: Vec, @@ -268,6 +296,7 @@ impl std::convert::From for VerifierDocUnchecked { signature: doc.signature, label_seed: doc.label_seed, merkle_root: doc.merkle_root, + cipher_block_size: doc.cipher_block_size, merkle_tree_leaf_count: doc.merkle_tree_leaf_count, merkle_multi_proof: doc.merkle_multi_proof, commitments: doc.commitments, From 2b64d39b3b2ca3b720c31283609881413721cc53 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Fri, 27 Jan 2023 09:46:20 +0200 Subject: [PATCH 04/23] removed cipher_block_size --- verifier/src/commitment.rs | 34 +---- verifier/src/lib.rs | 38 +----- verifier/src/signed.rs | 15 +-- verifier/src/utils.rs | 243 +++++------------------------------ verifier/src/verifier_doc.rs | 20 +-- 5 files changed, 47 insertions(+), 303 deletions(-) diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs index f7148bc401..c26f557a20 100644 --- a/verifier/src/commitment.rs +++ b/verifier/src/commitment.rs @@ -45,19 +45,12 @@ impl Commitment { ) -> Result<(), Error> { let expected = match self.typ { CommitmentType::labels_blake3 => { - let (seed, cipher_block_size) = - match extra_data.downcast::() { - Ok(extra_data) => (extra_data.label_seed, extra_data.cipher_block_size), - Err(_) => return Err(Error::InternalError), - }; - - compute_label_commitment( - &opening.opening, - &self.ranges, - &seed, - opening.salt(), - cipher_block_size, - )? + let seed = match extra_data.downcast::() { + Ok(seed) => seed, + Err(_) => return Err(Error::InternalError), + }; + + compute_label_commitment(&opening.opening, &self.ranges, &seed, opening.salt())? } _ => return Err(Error::InternalError), }; @@ -156,18 +149,3 @@ impl Range { self.end } } - -pub struct LabelSeedAndCipherBlockSize { - label_seed: LabelSeed, - cipher_block_size: usize, -} - -/// Extra data for [CommitmentType::labels_blake3] commitments -impl LabelSeedAndCipherBlockSize { - pub fn new(label_seed: LabelSeed, cipher_block_size: usize) -> Self { - Self { - label_seed, - cipher_block_size, - } - } -} diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index edc89327be..9df18b1a93 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -19,6 +19,7 @@ type HashCommitment = [u8; 32]; /// A PRG seeds from which to generate garbled circuit active labels, see /// [crate::commitment::CommitmentType::labels_blake3] type LabelSeed = [u8; 32]; + /// Verifier of the notarization document /// /// Once the verification succeeds, an application level (e.g. HTTP, JSON) parser can @@ -107,10 +108,6 @@ fn e2e_test() { let mut rng = rand::thread_rng(); - // The size in bytes of one block of the cipher that was computed inside the garbled circuit - // (16 for AES, 64 for ChaCha) - let cipher_block_size = 16; - let plaintext = b"This important data will be notarized"; // -------- After the webserver sends the Server Key Exchange message (during the TLS handshake), @@ -159,16 +156,8 @@ fn e2e_test() { // -------- The User retrieves her active labels using Oblivious Transfer (simulated below): - // convert plaintext into bits by splitting up the plaintext into 16-byte AES blocks, then - // making each block's bit ordering lsb0 - let bits: Vec = plaintext - .chunks(cipher_block_size) - .flat_map(|chunk| { - let mut bits = u8vec_to_boolvec(chunk); - bits.reverse(); - bits - }) - .collect(); + // convert plaintext into lsb0 bits + let bits = u8vec_to_boolvec(plaintext); let all_active_labels: Vec = full_labels .iter() @@ -197,19 +186,12 @@ fn e2e_test() { // hash all the active labels in the commitment's ranges let mut hasher = Hasher::new(); - // due to lsb0 ordering of labels, we need to split up each range into individual ranges covering - // each block and then flip each individual range for r in &ranges { - let block_ranges = utils::split_into_block_ranges(r, cipher_block_size); - for br in &block_ranges { - let flipped_range = utils::flip_range(br, cipher_block_size); - for label in - all_active_labels[flipped_range.start() * 8..flipped_range.end() * 8].iter() - { - hasher.update(&label.inner().to_be_bytes()); - } + for label in all_active_labels[r.start() * 8..r.end() * 8].iter() { + hasher.update(&label.inner().to_be_bytes()); } } + // add salt hasher.update(&salt); let hash_commitment: HashCommitment = hasher.finalize().into(); @@ -237,12 +219,7 @@ fn e2e_test() { // (note that ephemeralECPubkey is known both to the User and the Notary) let signed_tls = SignedTLS::new(TIME, ephemeral_pubkey, commitment_to_tls); - let signed = Signed::new( - signed_tls.clone(), - label_seed, - merkle_root, - cipher_block_size, - ); + let signed = Signed::new(signed_tls.clone(), label_seed, merkle_root); let signature = signing_key.sign(&bincode::serialize(&signed).unwrap()); let sig_der = signature.to_der(); @@ -267,7 +244,6 @@ fn e2e_test() { Some(signature.to_vec()), label_seed, merkle_root, - cipher_block_size, 1, proof, vec![comm], diff --git a/verifier/src/signed.rs b/verifier/src/signed.rs index b54dc22148..e9b329dbac 100644 --- a/verifier/src/signed.rs +++ b/verifier/src/signed.rs @@ -47,23 +47,15 @@ pub struct Signed { label_seed: LabelSeed, /// Merkle root of all the commitments merkle_root: [u8; 32], - /// Size of the cipher's block in bytes (16 for AES, 64 for ChaCha) - cipher_block_size: usize, } impl Signed { /// Creates a new struct to be signed by the Notary - pub fn new( - tls: SignedTLS, - label_seed: LabelSeed, - merkle_root: [u8; 32], - cipher_block_size: usize, - ) -> Self { + pub fn new(tls: SignedTLS, label_seed: LabelSeed, merkle_root: [u8; 32]) -> Self { Self { tls, label_seed, merkle_root, - cipher_block_size, } } @@ -82,10 +74,6 @@ impl Signed { pub fn merkle_root(&self) -> &[u8; 32] { &self.merkle_root } - - pub fn cipher_block_size(&self) -> usize { - self.cipher_block_size - } } /// Extracts relevant fields from the VerifierDoc. Those are the fields @@ -96,7 +84,6 @@ impl std::convert::From<&VerifierDoc> for Signed { doc.tls_doc().signed_tls().clone(), *doc.label_seed(), *doc.merkle_root(), - doc.cipher_block_size(), ) } } diff --git a/verifier/src/utils.rs b/verifier/src/utils.rs index 00fc3cca92..ba632989b3 100644 --- a/verifier/src/utils.rs +++ b/verifier/src/utils.rs @@ -4,15 +4,11 @@ use blake3::Hasher; /// Given a `substring` and its byte `ranges` within a larger string, computes a (`salt`ed) commitment /// to the garbled circuit labels. The labels are derived from a PRG `seed`. /// `ranges` are ordered ascendingly relative to each other. -/// -/// * cipher_block_size - The size of one block of the cipher which was computed inside the garbled -/// circuit (16 bytes for AES, 64 bytes for ChaCha) pub(crate) fn compute_label_commitment( substring: &[u8], ranges: &[Range], seed: &LabelSeed, salt: &[u8], - cipher_block_size: usize, ) -> Result { let mut enc = ChaChaEncoder::new(*seed); @@ -21,32 +17,25 @@ pub(crate) fn compute_label_commitment( let mut hasher = Hasher::new(); for r in ranges { - let block_ranges = split_into_block_ranges(r, cipher_block_size); - for br in &block_ranges { - let range_size = br.end() - br.start(); - let bytes_in_range: Vec = bytestring.drain(0..range_size).collect(); - - // convert bytes in the range into bits in lsb0 order - let mut bits = u8vec_to_boolvec(&bytes_in_range); - bits.reverse(); - let mut bits_iter = bits.into_iter(); - - // due to lsb0 ordering of labels, we need to flip the range bounds - let flipped_range = flip_range(br, cipher_block_size); - - // derive as many label pairs as there are bits in the range - for i in flipped_range.start() * 8..flipped_range.end() * 8 { - let label_pair = enc.encode(i); - let bit = match bits_iter.next() { - Some(bit) => bit, - // should never happen since this method is only called with ranges validated - // to correspond to the size of the substring - None => return Err(Error::InternalError), - }; - let active_label = if bit { label_pair[1] } else { label_pair[0] }; - - hasher.update(&active_label.inner().to_be_bytes()); - } + let range_size = r.end() - r.start(); + let bytes_in_range: Vec = bytestring.drain(0..range_size).collect(); + + // convert bytes in the range into bits in lsb0 order + let bits = u8vec_to_boolvec(&bytes_in_range); + let mut bits_iter = bits.into_iter(); + + // derive as many label pairs as there are bits in the range + for i in r.start() * 8..r.end() * 8 { + let label_pair = enc.encode(i); + let bit = match bits_iter.next() { + Some(bit) => bit, + // should never happen since this method is only called with ranges validated + // to correspond to the size of the substring + None => return Err(Error::InternalError), + }; + let active_label = if bit { label_pair[1] } else { label_pair[0] }; + + hasher.update(&active_label.inner().to_be_bytes()); } } // add salt @@ -54,124 +43,18 @@ pub(crate) fn compute_label_commitment( Ok(hasher.finalize().into()) } -/// Converts a u8 vec into an msb0 bool vec -/// (copied from tlsn/utils) +/// Converts a u8 vec into an lsb0 bool vec #[inline] pub(crate) fn u8vec_to_boolvec(v: &[u8]) -> Vec { let mut bv = Vec::with_capacity(v.len() * 8); for byte in v.iter() { for i in 0..8 { - bv.push(((byte >> (7 - i)) & 1) != 0); + bv.push(((byte >> i) & 1) != 0); } } bv } -/// Given the (validated) global `range` which covers multiple blocks of `block_size` each, splits -/// up the global `range` into multiple ranges each covering one block. -/// E.g. if the global `range` is [5, 35) and the `block_size` is 16, the returned ranges will be: -/// [5, 16) , [16, 32), [32, 35) -pub(crate) fn split_into_block_ranges(range: &Range, block_size: usize) -> Vec { - let range_size = range.end() - range.start(); - - // if the first block is only partially covered by the global range, store the - // partially covered size, otherwise, if it is fully covered, store 0. - let first_partial_size = { - let offset_from_block_start = range.start() % block_size; - if offset_from_block_start != 0 { - let potentially_covered_size = block_size - offset_from_block_start; - if potentially_covered_size > range_size { - // there is only one partially covered block in the global range - range_size - } else { - // potentially covered size is the actual covered size - potentially_covered_size - } - } else { - // the first block is fully covered by the global range - 0 - } - }; - - // if the last block is only partially covered by the global range, store the - // partially covered size, otherwise, if it is fully covered or if there is only one block - // total, store 0. - let last_partial_size = { - if first_partial_size == range_size { - // there is only one partially covered block in the global range - 0 - } else { - range.end() % block_size - } - }; - - let mut block_ranges: Vec = Vec::new(); - let mut start = range.start(); - let mut end = range.end(); - - let first_partial_range: Option = if first_partial_size > 0 { - // save original start - let orig_start = start; - // adjust the start of the global range - start += first_partial_size; - - Some(Range::new(orig_start, orig_start + first_partial_size)) - } else { - None - }; - - let last_partial_range: Option = if last_partial_size > 0 { - // save original end - let orig_end = end; - // adjust the end of the global range - end -= last_partial_size; - - Some(Range::new(orig_end - last_partial_size, orig_end)) - } else { - None - }; - - // now the global range covers only the full blocks - let full_block_count = (end - start) / block_size; - for i in 0..full_block_count { - // push full block ranges - block_ranges.push(Range::new( - start + i * block_size, - start + (i + 1) * block_size, - )); - } - - // if there were any partial ranges, insert them - if let Some(r) = first_partial_range { - block_ranges.insert(0, r) - }; - if let Some(r) = last_partial_range { - block_ranges.push(r) - }; - - block_ranges -} - -/// Given a byte `range` spanning only one block of `block_size`, returns a new -/// range which covers the same block's bytes after the block's bit ordering is changed to lsb0 -/// (the block is initially in msb0). -/// -/// E.g. if the original `range` is [33, 39) and `block_size` is 16, the result will be [41, 47) -pub(crate) fn flip_range(range: &Range, block_size: usize) -> Range { - // round down to the nearest multiple of `block_size` - let block_start_boundary = (range.start() / block_size) * block_size; - let block_end_boundary = block_start_boundary + block_size; - - // how far the range bounds are shifted from the block boundaries? - let shift_from_the_start = range.start() - block_start_boundary; - let shift_from_the_end = block_end_boundary - range.end(); - - Range::new( - block_start_boundary + shift_from_the_end, - block_end_boundary - shift_from_the_start, - ) -} - /// Outputs blake3 digest pub(crate) fn blake3(data: &[u8]) -> [u8; 32] { let mut hasher = Hasher::new(); @@ -197,80 +80,18 @@ mod test { #[test] fn test_u8vec_to_boolvec() { - let mut u = vec![false; 16]; + let mut u = vec![false; 8]; + u[0] = true; + u[2] = true; + u[4] = true; u[7] = true; - assert_eq!(u8vec_to_boolvec(&256u16.to_be_bytes()), u); - } - - #[test] - fn test_split_into_block_ranges() { - // first partial block, last partial block, full middle block - let r = Range::new(5, 35); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 5, end: 16 }, Range { start: 16, end: 32 }, Range { start: 32, end: 35 }]"; - assert_eq!(expected, format!("{:?}", out)); - - // first partial block, last partial block, no middle blocks - let r = Range::new(5, 25); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 5, end: 16 }, Range { start: 16, end: 25 }]"; - assert_eq!(expected, format!("{:?}", out)); - - // only one partial block - let r = Range::new(5, 10); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 5, end: 10 }]"; - assert_eq!(expected, format!("{:?}", out)); - - // only one full block at 0 offset - let r = Range::new(0, 16); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 0, end: 16 }]"; - assert_eq!(expected, format!("{:?}", out)); + let res = u8vec_to_boolvec(&149u8.to_be_bytes()); + assert_eq!(res, u); - // only one full block at non-zero offset - let r = Range::new(16, 32); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 16, end: 32 }]"; - assert_eq!(expected, format!("{:?}", out)); - - // first block full, last block partial - let r = Range::new(16, 33); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 16, end: 32 }, Range { start: 32, end: 33 }]"; - assert_eq!(expected, format!("{:?}", out)); - - // first block partial, last block full - let r = Range::new(15, 32); - let out = split_into_block_ranges(&r, 16); - let expected = "[Range { start: 15, end: 16 }, Range { start: 16, end: 32 }]"; - assert_eq!(expected, format!("{:?}", out)); - } - - #[test] - fn test_flip_range() { - // block start and end match range start and end - let r = Range::new(16, 32); - let out = flip_range(&r, 16); - let expected = "Range { start: 16, end: 32 }"; - assert_eq!(expected, format!("{:?}", out)); - - // only start matches - let r = Range::new(16, 30); - let out = flip_range(&r, 16); - let expected = "Range { start: 18, end: 32 }"; - assert_eq!(expected, format!("{:?}", out)); - - // only end matches - let r = Range::new(20, 32); - let out = flip_range(&r, 16); - let expected = "Range { start: 16, end: 28 }"; - assert_eq!(expected, format!("{:?}", out)); - - // neither start nor end match - let r = Range::new(33, 39); - let out = flip_range(&r, 16); - let expected = "Range { start: 41, end: 47 }"; - assert_eq!(expected, format!("{:?}", out)); + let mut u = vec![false; 16]; + u[0] = true; + u[9] = true; + let res = u8vec_to_boolvec(&258u16.to_be_bytes()); + assert_eq!(res, u); } } diff --git a/verifier/src/verifier_doc.rs b/verifier/src/verifier_doc.rs index c59c13db86..3873fab8c0 100644 --- a/verifier/src/verifier_doc.rs +++ b/verifier/src/verifier_doc.rs @@ -1,5 +1,3 @@ -use crate::commitment::LabelSeedAndCipherBlockSize; - use super::{ checks, commitment::{Commitment, CommitmentOpening, CommitmentType}, @@ -31,9 +29,6 @@ pub struct VerifierDoc { /// to the User. merkle_root: [u8; 32], - /// Size of the cipher's block in bytes (16 for AES, 64 for ChaCha) - cipher_block_size: usize, - /// The total leaf count in the Merkle tree of commitments. Provided by the User to the Verifier /// to enable merkle proof verification. merkle_tree_leaf_count: usize, @@ -59,7 +54,6 @@ impl VerifierDoc { signature: Option>, label_seed: LabelSeed, merkle_root: [u8; 32], - cipher_block_size: usize, merkle_tree_leaf_count: usize, merkle_multi_proof: MerkleProof, commitments: Vec, @@ -71,7 +65,6 @@ impl VerifierDoc { signature, label_seed, merkle_root, - cipher_block_size, merkle_tree_leaf_count, merkle_multi_proof, commitments, @@ -98,7 +91,6 @@ impl VerifierDoc { signature: unchecked.signature, label_seed: unchecked.label_seed, merkle_root: unchecked.merkle_root, - cipher_block_size: unchecked.cipher_block_size, merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, merkle_multi_proof: unchecked.merkle_multi_proof, commitments: unchecked.commitments, @@ -132,7 +124,6 @@ impl VerifierDoc { ); let label_seed = *signed_data.label_seed(); let merkle_root = *signed_data.merkle_root(); - let cipher_block_size = signed_data.cipher_block_size(); Ok(Self { version: unchecked.version, @@ -140,7 +131,6 @@ impl VerifierDoc { signature: unchecked.signature, label_seed, merkle_root, - cipher_block_size, merkle_tree_leaf_count: unchecked.merkle_tree_leaf_count, merkle_multi_proof: unchecked.merkle_multi_proof, commitments: unchecked.commitments, @@ -224,9 +214,7 @@ impl VerifierDoc { // verify each (opening, commitment) pair for (o, c) in openings.iter().zip(label_commitments) { - let extra_data = - LabelSeedAndCipherBlockSize::new(self.label_seed, self.cipher_block_size); - c.verify(o, Box::new(extra_data) as Box)?; + c.verify(o, Box::new(self.label_seed) as Box)?; } Ok(()) @@ -244,10 +232,6 @@ impl VerifierDoc { &self.merkle_root } - pub fn cipher_block_size(&self) -> usize { - self.cipher_block_size - } - pub fn tls_doc(&self) -> &TLSDoc { &self.tls_doc } @@ -270,7 +254,6 @@ pub struct VerifierDocUnchecked { signature: Option>, label_seed: LabelSeed, merkle_root: [u8; 32], - cipher_block_size: usize, merkle_tree_leaf_count: usize, merkle_multi_proof: MerkleProof, commitments: Vec, @@ -296,7 +279,6 @@ impl std::convert::From for VerifierDocUnchecked { signature: doc.signature, label_seed: doc.label_seed, merkle_root: doc.merkle_root, - cipher_block_size: doc.cipher_block_size, merkle_tree_leaf_count: doc.merkle_tree_leaf_count, merkle_multi_proof: doc.merkle_multi_proof, commitments: doc.commitments, From 64dba3e1a5aaf29b8410b7f5a0b4708407c1a478 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Fri, 27 Jan 2023 11:46:27 +0200 Subject: [PATCH 05/23] addressed some feedback --- verifier/src/checks.rs | 9 +++++---- verifier/src/commitment.rs | 15 +++++++++------ verifier/src/error.rs | 2 ++ verifier/src/lib.rs | 14 +++++++------- verifier/src/tls_doc.rs | 18 +++++++++--------- verifier/src/webpki_utils.rs | 28 ++++++++-------------------- 6 files changed, 40 insertions(+), 46 deletions(-) diff --git a/verifier/src/checks.rs b/verifier/src/checks.rs index 1323723580..077172fe59 100644 --- a/verifier/src/checks.rs +++ b/verifier/src/checks.rs @@ -193,7 +193,7 @@ fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Er let mut overlap_was_found = false; for haystack_range in haystack_c.ranges() { - match overlapping_range(needle_range, haystack_range) { + match overlapping_range(needle_range, haystack_range)? { Some(ov_range) => { // the bytesize of the overlap let overlap_size = ov_range.end() - ov_range.start(); @@ -246,13 +246,14 @@ fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Er } /// If two [Range]s overlap, returns the range containing the overlap -fn overlapping_range(a: &Range, b: &Range) -> Option { +fn overlapping_range(a: &Range, b: &Range) -> Result, Error> { // find purported overlap's start and end let ov_start = std::cmp::max(a.start(), b.start()); let ov_end = std::cmp::min(a.end(), b.end()); if (ov_end - ov_start) < 1 { - None + Ok(None) } else { - Some(Range::new(ov_start, ov_end)) + let range = Range::new(ov_start, ov_end)?; + Ok(Some(range)) } } diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs index c26f557a20..07d2026f21 100644 --- a/verifier/src/commitment.rs +++ b/verifier/src/commitment.rs @@ -121,12 +121,12 @@ impl CommitmentOpening { } #[derive(Serialize, Clone, PartialEq)] -// A TLS transcript consists of a stream of bytes which were sent to the server (Request) -// and a stream of bytes which were received from the server (Response). The User creates +// A TLS transcript consists of a stream of bytes which were `Sent` to the server +// and a stream of bytes which were `Received` from the server . The User creates // separate commitments to bytes in each direction. pub enum Direction { - Request, - Response, + Sent, + Received, } #[derive(Serialize, Clone, Debug)] @@ -137,8 +137,11 @@ pub struct Range { } impl Range { - pub fn new(start: usize, end: usize) -> Self { - Self { start, end } + pub fn new(start: usize, end: usize) -> Result { + if start <= end { + return Err(Error::RangeInvalid); + } + Ok(Self { start, end }) } pub fn start(&self) -> usize { diff --git a/verifier/src/error.rs b/verifier/src/error.rs index 1c8911dcf3..4158679063 100644 --- a/verifier/src/error.rs +++ b/verifier/src/error.rs @@ -34,4 +34,6 @@ pub enum Error { SerializationError, #[error("Error during signature verification")] SignatureVerificationError, + #[error("Attempted to create an invalid range")] + RangeInvalid, } diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 9df18b1a93..27cfb78188 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -24,7 +24,7 @@ type LabelSeed = [u8; 32]; /// /// Once the verification succeeds, an application level (e.g. HTTP, JSON) parser can /// parse `commitment_openings` in `doc` -struct Verifier { +pub struct Verifier { /// A validated notarization document which needs to be verified doc: VerifierDoc, /// A trusted Notary's pubkey (if this Verifier acted as the Notary then no pubkey needs @@ -94,8 +94,8 @@ fn e2e_test() { label_encoder::{Block, ChaChaEncoder}, signed::SignedTLS, tls_doc::{ - CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, - SignatureKeyExchangeParams, TLSDoc, + CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, KEParamsSigAlg, + ServerSignature, TLSDoc, }, utils::{blake3, bytes_in_ranges, u8vec_to_boolvec}, Signed, @@ -133,14 +133,14 @@ fn e2e_test() { let ephemeral_pubkey = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); let sig = hex::decode("337aa65793562550f6de0a9c792b5f531a96bb78f65a2063f710bfb99e11c791e13d35c798b50eea1351c14efc526009c7836e888206cebde7135130a1fbc049d42e1d1ed05c10f0d108b9540f049ac24fe1076d391b9da3d4e60b5cb8f341bda993f6002873847be744c1955ff575b2d833694fb8a432898c5ac55752e2bddcee4c07371335e1a6581694df43c6eb0ce8da4cdd497c205607b573f9c5d17c951e0a71fbf967c4bff53fc37c597b2f5656478fefb780e8f37bd8409985dd980eda4f254c7dce76dc69e66ed27c0f2c93b53a6dfd7b27359e1589a30d483725e92305766c62d6cad2c0142d3a3c4a2272e6d81eda2886ef12028167f83b3c33ea").unwrap(); - let params = SignatureKeyExchangeParams::new(SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, sig); + let server_sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig); let ephemeral_pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, ephemeral_pubkey); // -------- Using the above data, the User computes [CommittedTLS] and sends a commitment to // the Notary - let committed_tls = CommittedTLS::new(cert_chain, params, client_random, server_random); + let committed_tls = CommittedTLS::new(cert_chain, server_sig, client_random, server_random); let commitment_to_tls = blake3(&committed_tls.serialize().unwrap()); // -------- The Notary generates garbled circuit's labels from a PRG seed. One pair of labels @@ -179,7 +179,7 @@ fn e2e_test() { // Here we'll have 1 (salted) commitment which has 1 range - let ranges = vec![Range::new(5, 19)]; + let ranges = vec![Range::new(5, 19).unwrap()]; let salt: [u8; 32] = rng.gen(); @@ -199,7 +199,7 @@ fn e2e_test() { let comm = Commitment::new( 0, CommitmentType::labels_blake3, - Direction::Request, + Direction::Sent, hash_commitment, ranges.clone(), 0, diff --git a/verifier/src/tls_doc.rs b/verifier/src/tls_doc.rs index 078f5d20ec..6d6af02de1 100644 --- a/verifier/src/tls_doc.rs +++ b/verifier/src/tls_doc.rs @@ -87,7 +87,7 @@ pub type CertDER = Vec; #[derive(Serialize, Clone)] pub struct CommittedTLS { tls_cert_chain: Vec, - sig_ke_params: SignatureKeyExchangeParams, + sig_ke_params: ServerSignature, client_random: Vec, server_random: Vec, } @@ -95,7 +95,7 @@ pub struct CommittedTLS { impl CommittedTLS { pub fn new( tls_cert_chain: Vec, - sig_ke_params: SignatureKeyExchangeParams, + sig_ke_params: ServerSignature, client_random: Vec, server_random: Vec, ) -> Self { @@ -141,24 +141,24 @@ impl EphemeralECPubkey { /// Algorithms that can be used for signing the TLS key exchange parameters #[derive(Clone, Serialize)] -pub enum SigKEParamsAlg { +pub enum KEParamsSigAlg { RSA_PKCS1_2048_8192_SHA256, ECDSA_P256_SHA256, } -/// A signature over the TLS key exchange params +/// A server's signature over the TLS key exchange parameters #[derive(Serialize, Clone)] -pub struct SignatureKeyExchangeParams { - alg: SigKEParamsAlg, +pub struct ServerSignature { + alg: KEParamsSigAlg, sig: Vec, } -impl SignatureKeyExchangeParams { - pub fn new(alg: SigKEParamsAlg, sig: Vec) -> Self { +impl ServerSignature { + pub fn new(alg: KEParamsSigAlg, sig: Vec) -> Self { Self { alg, sig } } - pub fn alg(&self) -> &SigKEParamsAlg { + pub fn alg(&self) -> &KEParamsSigAlg { &self.alg } diff --git a/verifier/src/webpki_utils.rs b/verifier/src/webpki_utils.rs index 863983e786..de91ad5223 100644 --- a/verifier/src/webpki_utils.rs +++ b/verifier/src/webpki_utils.rs @@ -1,8 +1,5 @@ use super::{ - tls_doc::{ - CertDER, EphemeralECPubkey, EphemeralECPubkeyType, SigKEParamsAlg, - SignatureKeyExchangeParams, - }, + tls_doc::{CertDER, EphemeralECPubkey, EphemeralECPubkeyType, KEParamsSigAlg, ServerSignature}, Error, }; use x509_parser::{certificate, prelude::FromDer}; @@ -68,7 +65,7 @@ pub fn verify_cert_chain(chain: &[CertDER], time: u64) -> Result<(), Error> { /// * ephem_pubkey, client_random, server_random - Parameters which were signed pub fn verify_sig_ke_params( cert: &CertDER, - sig_ke_params: &SignatureKeyExchangeParams, + sig_ke_params: &ServerSignature, ephem_pubkey: &EphemeralECPubkey, client_random: &[u8], server_random: &[u8], @@ -99,8 +96,8 @@ pub fn verify_sig_ke_params( // we don't use [webpki::SignatureAlgorithm] in [SignatureKeyExchangeParams::alg] // because it requires a custom serializer. Instead we match: let sigalg = match &sig_ke_params.alg() { - SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256 => &webpki::RSA_PKCS1_2048_8192_SHA256, - SigKEParamsAlg::ECDSA_P256_SHA256 => &webpki::ECDSA_P256_SHA256, + KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256 => &webpki::RSA_PKCS1_2048_8192_SHA256, + KEParamsSigAlg::ECDSA_P256_SHA256 => &webpki::ECDSA_P256_SHA256, _ => return Err(Error::UnknownSigningAlgorithmInKeyExchange), }; @@ -234,10 +231,7 @@ mod test { let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(RSA_SIG); - let sig = SignatureKeyExchangeParams::new( - SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, - sig.to_vec(), - ); + let sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig.to_vec()); let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); @@ -252,7 +246,7 @@ mod test { let pubkey: &[u8] = &to_hex(ECDSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(ECDSA_SIG); - let sig = SignatureKeyExchangeParams::new(SigKEParamsAlg::ECDSA_P256_SHA256, sig.to_vec()); + let sig = ServerSignature::new(KEParamsSigAlg::ECDSA_P256_SHA256, sig.to_vec()); let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); @@ -267,10 +261,7 @@ mod test { let pubkey: &[u8] = &to_hex(RSA_EPHEM_PUBKEY); let sig: &[u8] = &to_hex(RSA_SIG); - let sig = SignatureKeyExchangeParams::new( - SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, - sig.to_vec(), - ); + let sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig.to_vec()); let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); @@ -302,10 +293,7 @@ mod test { let (corrupted, _) = last.overflowing_add(1); sig.push(corrupted); - let sig = SignatureKeyExchangeParams::new( - SigKEParamsAlg::RSA_PKCS1_2048_8192_SHA256, - sig.to_vec(), - ); + let sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig.to_vec()); let pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, pubkey.to_vec()); From 00169905ebfccb601aace658a9cb4d274b791525 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Fri, 27 Jan 2023 12:02:20 +0200 Subject: [PATCH 06/23] typo --- verifier/src/commitment.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/verifier/src/commitment.rs b/verifier/src/commitment.rs index 07d2026f21..e8c5b0a8be 100644 --- a/verifier/src/commitment.rs +++ b/verifier/src/commitment.rs @@ -138,7 +138,7 @@ pub struct Range { impl Range { pub fn new(start: usize, end: usize) -> Result { - if start <= end { + if start >= end { return Err(Error::RangeInvalid); } Ok(Self { start, end }) From b7879332146c7ad8108467915a2bb5e3349609d0 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Mon, 30 Jan 2023 14:04:51 +0200 Subject: [PATCH 07/23] added VerifiedDoc type --- verifier/src/checks.rs | 20 ++-- verifier/src/{verifier_doc.rs => doc.rs} | 142 ++++++++++++----------- verifier/src/lib.rs | 79 ++++++------- verifier/src/signed.rs | 8 +- 4 files changed, 130 insertions(+), 119 deletions(-) rename verifier/src/{verifier_doc.rs => doc.rs} (81%) diff --git a/verifier/src/checks.rs b/verifier/src/checks.rs index 077172fe59..6ddbbc3dfb 100644 --- a/verifier/src/checks.rs +++ b/verifier/src/checks.rs @@ -1,8 +1,8 @@ /// Methods performing various validation checks on the [crate::verifier_doc::VerifierDocUnchecked] -use super::verifier_doc::VerifierDocUnchecked; +use super::doc::UncheckedDoc; use super::{commitment::Range, Error}; -pub fn perform_checks(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +pub fn perform_checks(unchecked: &UncheckedDoc) -> Result<(), Error> { // Performs the following validation checks: // // - at least one commitment is present @@ -35,7 +35,7 @@ pub fn perform_checks(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { } /// Condition checked: at least one commitment is present -fn check_at_least_one_commitment_present(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_at_least_one_commitment_present(unchecked: &UncheckedDoc) -> Result<(), Error> { if unchecked.commitments().is_empty() { return Err(Error::SanityCheckError( "check_at_least_one_commitment_present".to_string(), @@ -45,7 +45,7 @@ fn check_at_least_one_commitment_present(unchecked: &VerifierDocUnchecked) -> Re } /// Condition checked: commitments and openings have their ids incremental and ascending -fn check_commitment_and_opening_ids(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_commitment_and_opening_ids(unchecked: &UncheckedDoc) -> Result<(), Error> { for i in 0..unchecked.commitments().len() { if !(unchecked.commitments()[i].id() == i && unchecked.commitment_openings()[i].id() == i) { return Err(Error::SanityCheckError( @@ -57,7 +57,7 @@ fn check_commitment_and_opening_ids(unchecked: &VerifierDocUnchecked) -> Result< } /// Condition checked: commitment count equals opening count -fn check_commitment_and_opening_count_equal(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_commitment_and_opening_count_equal(unchecked: &UncheckedDoc) -> Result<(), Error> { if unchecked.commitments().len() != unchecked.commitment_openings().len() { return Err(Error::SanityCheckError( "check_commitment_and_opening_count_equal".to_string(), @@ -67,7 +67,7 @@ fn check_commitment_and_opening_count_equal(unchecked: &VerifierDocUnchecked) -> } /// Condition checked: ranges inside one commitment are non-empty, valid, ascending, non-overlapping, non-overflowing -fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_ranges_inside_each_commitment(unchecked: &UncheckedDoc) -> Result<(), Error> { for c in unchecked.commitments() { let len = c.ranges().len(); // at least one range is expected @@ -110,7 +110,7 @@ fn check_ranges_inside_each_commitment(unchecked: &VerifierDocUnchecked) -> Resu /// corresponding commitment /// Condition checked: the total amount of committed data is less than 1GB to prevent DoS /// (this will cause the verifier to hash up to a max of 1GB * 128 = 128GB of labels) -fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_commitment_sizes(unchecked: &UncheckedDoc) -> Result<(), Error> { let mut total_committed = 0usize; for i in 0..unchecked.commitment_openings().len() { @@ -137,7 +137,7 @@ fn check_commitment_sizes(unchecked: &VerifierDocUnchecked) -> Result<(), Error> /// Condition checked: the amount of commitments is less that 1000 /// (searching for overlapping commitments in the naive way which we implemeted has quadratic cost, /// hence this number shouldn't be too high to prevent DoS) -fn check_commitment_count(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_commitment_count(unchecked: &UncheckedDoc) -> Result<(), Error> { if unchecked.commitments().len() >= 1000 { return Err(Error::SanityCheckError( "check_commitment_count".to_string(), @@ -147,7 +147,7 @@ fn check_commitment_count(unchecked: &VerifierDocUnchecked) -> Result<(), Error> } /// Condition checked: each Merkle tree index is both unique and also ascending between commitments -fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_merkle_tree_indices(unchecked: &UncheckedDoc) -> Result<(), Error> { let indices: Vec = unchecked .commitments() .iter() @@ -166,7 +166,7 @@ fn check_merkle_tree_indices(unchecked: &VerifierDocUnchecked) -> Result<(), Err /// Makes sure that if two or more commitments contain overlapping ranges, the openings /// corresponding to those ranges match exactly. Otherwise, if the openings don't match, /// returns an error. -fn check_overlapping_openings(unchecked: &VerifierDocUnchecked) -> Result<(), Error> { +fn check_overlapping_openings(unchecked: &UncheckedDoc) -> Result<(), Error> { // Note: using an existing lib to find multi-range overlap would incur the need to audit // that lib for correctness. Instead, since checking two range overlap is cheap, we are using // a naive way where we compare each range to all other ranges. diff --git a/verifier/src/verifier_doc.rs b/verifier/src/doc.rs similarity index 81% rename from verifier/src/verifier_doc.rs rename to verifier/src/doc.rs index 3873fab8c0..351af9cbed 100644 --- a/verifier/src/verifier_doc.rs +++ b/verifier/src/doc.rs @@ -10,8 +10,8 @@ use serde::{ser::Serializer, Serialize}; use std::{any::Any, collections::HashMap}; #[derive(Serialize)] -/// A validated notarization document received from the User -pub struct VerifierDoc { +/// A validated and verified notarization document +pub struct VerifiedDoc { version: u8, tls_doc: TLSDoc, /// Notary's signature over the [Signed] portion of this doc @@ -44,10 +44,48 @@ pub struct VerifierDoc { commitment_openings: Vec, } -impl VerifierDoc { - /// Creates a new document. This method is called only by the User. - /// [VerifierDoc] is never passed directly to the Verifier. Instead, the User must convert - /// it into [VerifierDocUnchecked] +impl VerifiedDoc { + /// Creates a new [VerifiedDoc] from [ValidatedDoc] + pub(crate) fn from_validated(validated: ValidatedDoc) -> Self { + Self { + version: validated.version, + tls_doc: validated.tls_doc, + signature: validated.signature, + label_seed: validated.label_seed, + merkle_root: validated.merkle_root, + merkle_tree_leaf_count: validated.merkle_tree_leaf_count, + merkle_multi_proof: validated.merkle_multi_proof, + commitments: validated.commitments, + commitment_openings: validated.commitment_openings, + } + } + + pub fn commitments(&self) -> &Vec { + &self.commitments + } + + pub fn commitment_openings(&self) -> &Vec { + &self.commitment_openings + } +} + +/// Notarization document in its unchecked form. This is the form in which the document is received +/// by the Verifier from the User. +pub struct UncheckedDoc { + /// All fields are exactly as in [VerifiedDoc] + version: u8, + tls_doc: TLSDoc, + signature: Option>, + label_seed: LabelSeed, + merkle_root: [u8; 32], + merkle_tree_leaf_count: usize, + merkle_multi_proof: MerkleProof, + commitments: Vec, + commitment_openings: Vec, +} + +impl UncheckedDoc { + /// Creates a new unchecked document. This method is called only by the User. pub fn new( version: u8, tls_doc: TLSDoc, @@ -72,13 +110,36 @@ impl VerifierDoc { } } - /// Returns a new [VerifierDoc] after performing all validation checks. This is the only way - /// for the Verifier (who was NOT acting as the Notary) to derive [VerifierDoc]. - pub fn from_unchecked(unchecked: VerifierDocUnchecked) -> Result { + pub fn commitments(&self) -> &Vec { + &self.commitments + } + + pub fn commitment_openings(&self) -> &Vec { + &self.commitment_openings + } +} + +// Notarization document in its validated form (not yet verified) +pub(crate) struct ValidatedDoc { + /// All fields are exactly as in [VerifiedDoc] + version: u8, + tls_doc: TLSDoc, + signature: Option>, + label_seed: LabelSeed, + merkle_root: [u8; 32], + merkle_tree_leaf_count: usize, + merkle_multi_proof: MerkleProof, + commitments: Vec, + commitment_openings: Vec, +} + +impl ValidatedDoc { + /// Returns a new [ValidatedDoc] after performing all validation checks + pub(crate) fn from_unchecked(unchecked: UncheckedDoc) -> Result { checks::perform_checks(&unchecked)?; // Make sure the Notary's signature is present. - // (If the Verifier IS also the Notary then the signature is NOT needed. `VerifierDoc` + // (If the Verifier IS also the Notary then the signature is NOT needed. `VerifiedDoc` // should be created with `from_unchecked_with_signed_data()` instead.) if unchecked.signature.is_none() { @@ -98,18 +159,17 @@ impl VerifierDoc { }) } - /// Returns a new VerifierDoc after performing all validation checks and adding the signed data. - /// This is the only way for the Verifier who acted as the Notary to derive [VerifierDoc]. + /// Returns a new [ValidatedDoc] after performing all validation checks and adding the signed data. /// `signed_data` (despite its name) is not actually signed because it was generated locally by /// the calling Verifier. - pub fn from_unchecked_with_signed_data( - unchecked: VerifierDocUnchecked, + pub(crate) fn from_unchecked_with_signed_data( + unchecked: UncheckedDoc, signed_data: Signed, ) -> Result { checks::perform_checks(&unchecked)?; // Make sure the Notary's signature is NOT present. - // (If the Verifier is NOT the Notary then the Notary's signature IS needed. `VerifierDoc` + // (If the Verifier is NOT the Notary then the Notary's signature IS needed. `ValidatedDoc` // should be created with `from_unchecked()` instead.) if unchecked.signature.is_some() { @@ -142,7 +202,7 @@ impl VerifierDoc { /// - the TLS document /// - the inclusion of commitments in the Merkle tree /// - each commitment - pub fn verify(&self, dns_name: String) -> Result<(), Error> { + pub(crate) fn verify(&self, dns_name: String) -> Result<(), Error> { self.tls_doc.verify(dns_name)?; self.verify_merkle_proofs()?; @@ -235,56 +295,6 @@ impl VerifierDoc { pub fn tls_doc(&self) -> &TLSDoc { &self.tls_doc } - - pub fn commitments(&self) -> &Vec { - &self.commitments - } - - pub fn commitment_openings(&self) -> &Vec { - &self.commitment_openings - } -} - -/// This is the [VerifierDoc] in its unchecked form. This is the form in which the doc is received -/// by the Verifier from the User. -pub struct VerifierDocUnchecked { - /// All fields are exactly as in [VerifierDoc] - version: u8, - tls_doc: TLSDoc, - signature: Option>, - label_seed: LabelSeed, - merkle_root: [u8; 32], - merkle_tree_leaf_count: usize, - merkle_multi_proof: MerkleProof, - commitments: Vec, - commitment_openings: Vec, -} - -impl VerifierDocUnchecked { - pub fn commitments(&self) -> &Vec { - &self.commitments - } - - pub fn commitment_openings(&self) -> &Vec { - &self.commitment_openings - } -} - -/// Converts VerifierDoc into an unchecked type with will be passed to the Verifier -impl std::convert::From for VerifierDocUnchecked { - fn from(doc: VerifierDoc) -> Self { - Self { - version: doc.version, - tls_doc: doc.tls_doc, - signature: doc.signature, - label_seed: doc.label_seed, - merkle_root: doc.merkle_root, - merkle_tree_leaf_count: doc.merkle_tree_leaf_count, - merkle_multi_proof: doc.merkle_multi_proof, - commitments: doc.commitments, - commitment_openings: doc.commitment_openings, - } - } } /// Serialize the [MerkleProof] type using its native `serialize` method diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 27cfb78188..5f004452a8 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -1,18 +1,18 @@ mod checks; mod commitment; +mod doc; mod error; mod label_encoder; mod pubkey; mod signed; mod tls_doc; mod utils; -mod verifier_doc; mod webpki_utils; -use crate::signed::Signed; +use crate::{doc::ValidatedDoc, signed::Signed}; +use doc::{UncheckedDoc, VerifiedDoc}; use error::Error; use pubkey::PubKey; -use verifier_doc::{VerifierDoc, VerifierDocUnchecked}; type HashCommitment = [u8; 32]; @@ -24,40 +24,38 @@ type LabelSeed = [u8; 32]; /// /// Once the verification succeeds, an application level (e.g. HTTP, JSON) parser can /// parse `commitment_openings` in `doc` -pub struct Verifier { - /// A validated notarization document which needs to be verified - doc: VerifierDoc, - /// A trusted Notary's pubkey (if this Verifier acted as the Notary then no pubkey needs - /// to be provided) - trusted_pubkey: Option, -} +pub struct Verifier {} impl Verifier { - /// Validates the notarization document and creates a new Verifier - pub fn new( - doc_unchecked: VerifierDocUnchecked, - trusted_pubkey: Option, - ) -> Result { - let doc = VerifierDoc::from_unchecked(doc_unchecked)?; - Ok(Self { - doc, - trusted_pubkey, - }) + /// Creates a new Verifier + pub fn new() -> Self { + Self {} } /// Verifies that the notarization document resulted from notarizing data from a TLS server with the - /// DNS name `dns_name`. `dns_name` must be exactly as it appears in the server's TLS certificate. - /// Also verifies the Notary's signature (if any). + /// DNS name `dns_name`. Also verifies the Notary's signature (if any). /// /// IMPORTANT: /// if the notarized application data is HTTP, the checks below will not be sufficient. You must also /// check on the HTTP parser's level against domain fronting. /// - pub fn verify(&self, dns_name: String) -> Result<(), Error> { + /// * unchecked_doc - The notarization document to be validated and verified + /// * trusted_pubkey - A trusted Notary's pubkey (if this Verifier acted as the Notary then no + /// pubkey needs to be provided) + /// * dns_name - A DNS name. Must be exactly as it appears in the server's TLS certificate. + pub fn verify( + &self, + unchecked_doc: UncheckedDoc, + trusted_pubkey: Option, + dns_name: String, + ) -> Result { + // validate the document + let doc = ValidatedDoc::from_unchecked(unchecked_doc)?; + // verify Notary's signature, if any - match (self.doc.signature(), &self.trusted_pubkey) { + match (doc.signature(), &trusted_pubkey) { (Some(sig), Some(pubkey)) => { - self.verify_doc_signature(pubkey, sig)?; + self.verify_doc_signature(pubkey, sig, self.signed_data(&doc))?; } // no pubkey and no signature, do nothing (None, None) => (), @@ -68,21 +66,21 @@ impl Verifier { } // verify the document - self.doc.verify(dns_name)?; + doc.verify(dns_name)?; - Ok(()) + Ok(VerifiedDoc::from_validated(doc)) } /// Verifies Notary's signature on that part of the document which was signed - fn verify_doc_signature(&self, pubkey: &PubKey, sig: &[u8]) -> Result<(), Error> { - let msg = self.signed_data().serialize()?; + fn verify_doc_signature(&self, pubkey: &PubKey, sig: &[u8], msg: Signed) -> Result<(), Error> { + let msg = msg.serialize()?; pubkey.verify_signature(&msg, sig) } - /// Extracts the necessary fields from the [VerifierDoc] into a [Signed] + /// Extracts the necessary fields from the [VerifiedDoc] into a [Signed] /// struct and returns it - fn signed_data(&self) -> Signed { - (&self.doc).into() + fn signed_data(&self, doc: &ValidatedDoc) -> Signed { + doc.into() } } @@ -238,7 +236,7 @@ fn e2e_test() { let indices_to_prove = vec![0]; let proof = merkle_tree.proof(&indices_to_prove); - let doc = VerifierDoc::new( + let unchecked_doc = UncheckedDoc::new( 1, tls_doc, Some(signature.to_vec()), @@ -250,24 +248,27 @@ fn e2e_test() { vec![open], ); - // -------- The User converts the doc into an unchecked type and passes it to the Verifier - let doc_unchecked: VerifierDocUnchecked = doc.into(); - // -------- The Verifier verifies the doc: // Initially the Verifier may store the Notary's pubkey as bytes. Converts it into // PubKey type let trusted_pubkey = PubKey::from_bytes(KeyType::P256, pubkey_bytes).unwrap(); - let verifier = Verifier::new(doc_unchecked, Some(trusted_pubkey)).unwrap(); + let verifier = Verifier::new(); - verifier.verify("tlsnotary.org".to_string()).unwrap(); + let verified_doc = verifier + .verify( + unchecked_doc, + Some(trusted_pubkey), + "tlsnotary.org".to_string(), + ) + .unwrap(); // -------- The Verifier proceeds to put each verified commitment opening through an application // level (e.g. http) parser assert_eq!( - String::from_utf8(verifier.doc.commitment_openings()[0].opening().clone()).unwrap(), + String::from_utf8(verified_doc.commitment_openings()[0].opening().clone()).unwrap(), "important data".to_string() ); } diff --git a/verifier/src/signed.rs b/verifier/src/signed.rs index e9b329dbac..0955203fce 100644 --- a/verifier/src/signed.rs +++ b/verifier/src/signed.rs @@ -1,4 +1,4 @@ -use super::{tls_doc::EphemeralECPubkey, Error, HashCommitment, LabelSeed, VerifierDoc}; +use super::{tls_doc::EphemeralECPubkey, Error, HashCommitment, LabelSeed, ValidatedDoc}; use serde::Serialize; #[derive(Clone, Serialize)] @@ -42,7 +42,7 @@ impl SignedTLS { #[derive(Clone, Serialize)] pub struct Signed { tls: SignedTLS, - // see comments in [crate::VerifierDoc] for details about the fields below + // see comments in [crate::doc::VerifiedDoc] for details about the fields below /// PRG seed from which garbled circuit labels are generated label_seed: LabelSeed, /// Merkle root of all the commitments @@ -78,8 +78,8 @@ impl Signed { /// Extracts relevant fields from the VerifierDoc. Those are the fields /// which the Notary signs -impl std::convert::From<&VerifierDoc> for Signed { - fn from(doc: &VerifierDoc) -> Self { +impl std::convert::From<&ValidatedDoc> for Signed { + fn from(doc: &ValidatedDoc) -> Self { Signed::new( doc.tls_doc().signed_tls().clone(), *doc.label_seed(), From 26e4ac1e8a7dfbf9d4366f431667ed6f68d2da95 Mon Sep 17 00:00:00 2001 From: themighty1 Date: Tue, 31 Jan 2023 13:19:04 +0200 Subject: [PATCH 08/23] moved test to tests/ --- verifier/Cargo.toml | 4 +- verifier/src/label_encoder.rs | 3 +- verifier/src/lib.rs | 203 +---------------------------- verifier/src/utils.rs | 12 -- verifier/tests/integration_test.rs | 202 ++++++++++++++++++++++++++++ 5 files changed, 214 insertions(+), 210 deletions(-) create mode 100644 verifier/tests/integration_test.rs diff --git a/verifier/Cargo.toml b/verifier/Cargo.toml index 5c51135f81..89dd5b669c 100644 --- a/verifier/Cargo.toml +++ b/verifier/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -#tlsn-mpc-core = { path = "../mpc/mpc-core", features = ["garble"] } blake3 = "1.3.3" sha2 = "0.10" p256 = { version = "0.10", features = ["ecdsa"]} @@ -22,4 +21,7 @@ serde = { version = "1.0", features = ["derive"] } bincode = "1.3.3" [dev-dependencies] +tlsn-mpc-core = { path = "../mpc/mpc-core" } +tlsn-mpc-circuits = { path = "../mpc/mpc-circuits" } +tlsn-tls-circuits = { path = "../tls/tls-circuits" } hex = "0.4" diff --git a/verifier/src/label_encoder.rs b/verifier/src/label_encoder.rs index d3a137d964..1ead30e8b8 100644 --- a/verifier/src/label_encoder.rs +++ b/verifier/src/label_encoder.rs @@ -6,7 +6,8 @@ use rand_chacha::ChaCha20Rng; use std::ops::BitXor; const DELTA_STREAM_ID: u64 = u64::MAX; -const PLAINTEXT_STREAM_ID: u64 = 1; +/// PLAINTEXT_STREAM_ID must match the id of the plaintext input in tls/tls-circuits/src/c6.rs +const PLAINTEXT_STREAM_ID: u64 = 4; #[derive(Clone, Copy)] pub struct Block(u128); diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 5f004452a8..c29dee18f2 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -1,11 +1,11 @@ mod checks; -mod commitment; -mod doc; +pub mod commitment; +pub mod doc; mod error; mod label_encoder; -mod pubkey; -mod signed; -mod tls_doc; +pub mod pubkey; +pub mod signed; +pub mod tls_doc; mod utils; mod webpki_utils; @@ -14,11 +14,11 @@ use doc::{UncheckedDoc, VerifiedDoc}; use error::Error; use pubkey::PubKey; -type HashCommitment = [u8; 32]; +pub type HashCommitment = [u8; 32]; /// A PRG seeds from which to generate garbled circuit active labels, see /// [crate::commitment::CommitmentType::labels_blake3] -type LabelSeed = [u8; 32]; +pub type LabelSeed = [u8; 32]; /// Verifier of the notarization document /// @@ -83,192 +83,3 @@ impl Verifier { doc.into() } } - -#[test] -// Create a document and verify it -fn e2e_test() { - use crate::{ - commitment::{Commitment, CommitmentOpening, CommitmentType, Direction, Range}, - label_encoder::{Block, ChaChaEncoder}, - signed::SignedTLS, - tls_doc::{ - CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, KEParamsSigAlg, - ServerSignature, TLSDoc, - }, - utils::{blake3, bytes_in_ranges, u8vec_to_boolvec}, - Signed, - }; - use blake3::Hasher; - use p256::ecdsa::{signature::Signer, SigningKey, VerifyingKey}; - use pubkey::KeyType; - use rand::Rng; - use rs_merkle::{algorithms::Sha256, MerkleTree}; - - let mut rng = rand::thread_rng(); - - let plaintext = b"This important data will be notarized"; - - // -------- After the webserver sends the Server Key Exchange message (during the TLS handshake), - // the tls-client module provides the following TLS data: - - /// end entity cert - static EE: &[u8] = include_bytes!("testdata/tlsnotary.org/ee.der"); - // intermediate cert - static INTER: &[u8] = include_bytes!("testdata/tlsnotary.org/inter.der"); - // certificate authority cert - static CA: &[u8] = include_bytes!("testdata/tlsnotary.org/ca.der"); - let cert_chain = vec![CA.to_vec(), INTER.to_vec(), EE.to_vec()]; - // unix time when the cert chain was valid - static TIME: u64 = 1671637529; - - // data taken from an actual network trace captured with `tcpdump host tlsnotary.org -w out.pcap` - // (see testdata/key_exchange/README for details) - - let client_random = - hex::decode("ac3808970faf996d38864e205c6b787a1d05f681654a5d2a3c87f7dd2f13332e").unwrap(); - let server_random = - hex::decode("8abf9a0c4b3b9694edac3d19e8eb7a637bfa8fe5644bd9f1444f574e47524401").unwrap(); - let ephemeral_pubkey = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); - let sig = hex::decode("337aa65793562550f6de0a9c792b5f531a96bb78f65a2063f710bfb99e11c791e13d35c798b50eea1351c14efc526009c7836e888206cebde7135130a1fbc049d42e1d1ed05c10f0d108b9540f049ac24fe1076d391b9da3d4e60b5cb8f341bda993f6002873847be744c1955ff575b2d833694fb8a432898c5ac55752e2bddcee4c07371335e1a6581694df43c6eb0ce8da4cdd497c205607b573f9c5d17c951e0a71fbf967c4bff53fc37c597b2f5656478fefb780e8f37bd8409985dd980eda4f254c7dce76dc69e66ed27c0f2c93b53a6dfd7b27359e1589a30d483725e92305766c62d6cad2c0142d3a3c4a2272e6d81eda2886ef12028167f83b3c33ea").unwrap(); - - let server_sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig); - - let ephemeral_pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, ephemeral_pubkey); - - // -------- Using the above data, the User computes [CommittedTLS] and sends a commitment to - // the Notary - - let committed_tls = CommittedTLS::new(cert_chain, server_sig, client_random, server_random); - let commitment_to_tls = blake3(&committed_tls.serialize().unwrap()); - - // -------- The Notary generates garbled circuit's labels from a PRG seed. One pair of labels - // for each bit of plaintext - - let label_seed: LabelSeed = rng.gen(); - - let mut enc = ChaChaEncoder::new(label_seed); - - // Note that for this test's purposes the Notary is using crate::label_encoder. - // In production he will use tlsn/mpc/mpc-core/garble/label/encoder - let full_labels: Vec<[Block; 2]> = (0..plaintext.len() * 8).map(|i| enc.encode(i)).collect(); - - // -------- The User retrieves her active labels using Oblivious Transfer (simulated below): - - // convert plaintext into lsb0 bits - let bits = u8vec_to_boolvec(plaintext); - - let all_active_labels: Vec = full_labels - .iter() - .zip(bits) - .map( - |(label_pair, bit)| { - if bit { - label_pair[1] - } else { - label_pair[0] - } - }, - ) - .collect(); - - // ---------- After the notar. session is over: -------- - - // -------- The User computes all her commitments - - // Here we'll have 1 (salted) commitment which has 1 range - - let ranges = vec![Range::new(5, 19).unwrap()]; - - let salt: [u8; 32] = rng.gen(); - - // hash all the active labels in the commitment's ranges - let mut hasher = Hasher::new(); - - for r in &ranges { - for label in all_active_labels[r.start() * 8..r.end() * 8].iter() { - hasher.update(&label.inner().to_be_bytes()); - } - } - - // add salt - hasher.update(&salt); - let hash_commitment: HashCommitment = hasher.finalize().into(); - - let comm = Commitment::new( - 0, - CommitmentType::labels_blake3, - Direction::Sent, - hash_commitment, - ranges.clone(), - 0, - ); - - // -------- The User creates a merkle tree of commitments and then a merkle proof of inclusion. - // Sends the merkle_root to the Notary - let leaves = [hash_commitment]; - let merkle_tree = MerkleTree::::from_leaves(&leaves); - let merkle_root = merkle_tree.root().unwrap(); - - // -------- the Notary uses his pubkey to compute a signature - let signing_key = SigningKey::random(&mut rng); - let verifying_key = VerifyingKey::from(&signing_key); - let encoded = verifying_key.to_encoded_point(true); - let pubkey_bytes = encoded.as_bytes(); - - // (note that ephemeralECPubkey is known both to the User and the Notary) - let signed_tls = SignedTLS::new(TIME, ephemeral_pubkey, commitment_to_tls); - let signed = Signed::new(signed_tls.clone(), label_seed, merkle_root); - - let signature = signing_key.sign(&bincode::serialize(&signed).unwrap()); - let sig_der = signature.to_der(); - let signature = sig_der.as_bytes(); - - // -------- the Notary reveals `label_seed` and also sends the `signature` and `time`. - - // -------- After that the User creates a doc for the Verifier: - // (The User creates `signed_tls` just like the Notary did above) - let tls_doc = TLSDoc::new(signed_tls, committed_tls); - - // prepares openings and merkle proofs for those openings - let opening_bytes = bytes_in_ranges(plaintext, &ranges); - let open = CommitmentOpening::new(0, opening_bytes, salt.to_vec()); - - let indices_to_prove = vec![0]; - let proof = merkle_tree.proof(&indices_to_prove); - - let unchecked_doc = UncheckedDoc::new( - 1, - tls_doc, - Some(signature.to_vec()), - label_seed, - merkle_root, - 1, - proof, - vec![comm], - vec![open], - ); - - // -------- The Verifier verifies the doc: - - // Initially the Verifier may store the Notary's pubkey as bytes. Converts it into - // PubKey type - let trusted_pubkey = PubKey::from_bytes(KeyType::P256, pubkey_bytes).unwrap(); - - let verifier = Verifier::new(); - - let verified_doc = verifier - .verify( - unchecked_doc, - Some(trusted_pubkey), - "tlsnotary.org".to_string(), - ) - .unwrap(); - - // -------- The Verifier proceeds to put each verified commitment opening through an application - // level (e.g. http) parser - - assert_eq!( - String::from_utf8(verified_doc.commitment_openings()[0].opening().clone()).unwrap(), - "important data".to_string() - ); -} diff --git a/verifier/src/utils.rs b/verifier/src/utils.rs index ba632989b3..a9d562dcc8 100644 --- a/verifier/src/utils.rs +++ b/verifier/src/utils.rs @@ -62,18 +62,6 @@ pub(crate) fn blake3(data: &[u8]) -> [u8; 32] { hasher.finalize().into() } -/// Returns a substring of the original `bytestring` containing only the bytes in `ranges`. -/// This method is only called with validated `ranges` which do not exceed the size of the -/// `bytestring`. -#[cfg(test)] -pub(crate) fn bytes_in_ranges(bytestring: &[u8], ranges: &[Range]) -> Vec { - let mut substring: Vec = Vec::new(); - for r in ranges { - substring.append(&mut bytestring[r.start()..r.end()].to_vec()) - } - substring -} - #[cfg(test)] mod test { use super::*; diff --git a/verifier/tests/integration_test.rs b/verifier/tests/integration_test.rs new file mode 100644 index 0000000000..1e816bcefc --- /dev/null +++ b/verifier/tests/integration_test.rs @@ -0,0 +1,202 @@ +use ::blake3::Hasher; +use mpc_circuits::Value; +use mpc_core::garble::{ChaChaEncoder, Encoder, Label}; +use p256::ecdsa::{signature::Signer, SigningKey, VerifyingKey}; +use rand::Rng; +use rs_merkle::{algorithms::Sha256, MerkleTree}; +use tls_circuits::c6; +use verifier::{ + commitment::{Commitment, CommitmentOpening, CommitmentType, Direction, Range}, + doc::UncheckedDoc, + pubkey::{KeyType, PubKey}, + signed::{Signed, SignedTLS}, + tls_doc::{ + CommittedTLS, EphemeralECPubkey, EphemeralECPubkeyType, KEParamsSigAlg, ServerSignature, + TLSDoc, + }, + HashCommitment, LabelSeed, Verifier, +}; + +// End-to-end test. Create a notarization document and verify it. +#[test] +fn e2e_test() { + let mut rng = rand::thread_rng(); + + // plaintext padded to a multiple of 16 bytes + let plaintext = b"This important data will be notarized..........."; + + // -------- After the webserver sends the Server Key Exchange message (during the TLS handshake), + // the tls-client module provides the following TLS data: + + /// end entity cert + static EE: &[u8] = include_bytes!("../src/testdata/tlsnotary.org/ee.der"); + // intermediate cert + static INTER: &[u8] = include_bytes!("../src/testdata/tlsnotary.org/inter.der"); + // certificate authority cert + static CA: &[u8] = include_bytes!("../src/testdata/tlsnotary.org/ca.der"); + let cert_chain = vec![CA.to_vec(), INTER.to_vec(), EE.to_vec()]; + // unix time when the cert chain was valid + static TIME: u64 = 1671637529; + + // data taken from an actual network trace captured with `tcpdump host tlsnotary.org -w out.pcap` + // (see testdata/key_exchange/README for details) + + let client_random = + hex::decode("ac3808970faf996d38864e205c6b787a1d05f681654a5d2a3c87f7dd2f13332e").unwrap(); + let server_random = + hex::decode("8abf9a0c4b3b9694edac3d19e8eb7a637bfa8fe5644bd9f1444f574e47524401").unwrap(); + let ephemeral_pubkey = hex::decode("04521e456448e6156026bb1392e0a689c051a84d67d353ab755fce68a2e9fba68d09393fa6485db84517e16d9855ce5ba3ec2293f2e511d1e315570531722e9788").unwrap(); + let sig = hex::decode("337aa65793562550f6de0a9c792b5f531a96bb78f65a2063f710bfb99e11c791e13d35c798b50eea1351c14efc526009c7836e888206cebde7135130a1fbc049d42e1d1ed05c10f0d108b9540f049ac24fe1076d391b9da3d4e60b5cb8f341bda993f6002873847be744c1955ff575b2d833694fb8a432898c5ac55752e2bddcee4c07371335e1a6581694df43c6eb0ce8da4cdd497c205607b573f9c5d17c951e0a71fbf967c4bff53fc37c597b2f5656478fefb780e8f37bd8409985dd980eda4f254c7dce76dc69e66ed27c0f2c93b53a6dfd7b27359e1589a30d483725e92305766c62d6cad2c0142d3a3c4a2272e6d81eda2886ef12028167f83b3c33ea").unwrap(); + + let server_sig = ServerSignature::new(KEParamsSigAlg::RSA_PKCS1_2048_8192_SHA256, sig); + + let ephemeral_pubkey = EphemeralECPubkey::new(EphemeralECPubkeyType::P256, ephemeral_pubkey); + + // -------- Using the above data, the User computes [CommittedTLS] and sends a commitment to + // the Notary + + let committed_tls = CommittedTLS::new(cert_chain, server_sig, client_random, server_random); + let commitment_to_tls = blake3(&committed_tls.serialize().unwrap()); + + // -------- The Notary generates garbled circuit's labels from a PRG seed + let label_seed: LabelSeed = rng.gen(); + + let mut enc = ChaChaEncoder::new(label_seed); + + // encoder works only on the `Input` type. This is the only way to obtain it + // c6 is the AES encryption circuit, input with id == 4 is the plaintext + let input = c6().input(4).unwrap(); + + // since `input` is a 16-byte value, encode one 16-byte chunk at a time + let active_labels: Vec