diff --git a/Cargo.toml b/Cargo.toml index 007981d74b..dd9c659f61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,8 @@ readme = "README.md" repository = "https://github.com/sigstore/sigstore-rs" [features] -default = ["full-native-tls", "cached-client", "sigstore-trust-root", "sign"] -wasm = ["getrandom/js"] +default = ["full-native-tls", "cached-client", "sigstore-trust-root", "bundle"] +wasm = ["getrandom/js", "ring/wasm32_unknown_unknown_js", "chrono/wasmbind"] full-native-tls = [ "fulcio-native-tls", @@ -28,21 +28,23 @@ full-rustls-tls = [ # This features is used by tests that use docker to create a registry test-registry = [] -fulcio-native-tls = ["oauth-native-tls", "reqwest/native-tls", "fulcio"] -fulcio-rustls-tls = ["oauth-rustls-tls", "reqwest/rustls-tls", "fulcio"] -fulcio = [] - oauth-native-tls = ["openidconnect/native-tls", "oauth"] oauth-rustls-tls = ["openidconnect/rustls-tls", "oauth"] -oauth = [] +oauth = ["openidconnect"] + +fulcio-native-tls = ["oauth-native-tls", "reqwest/native-tls", "fulcio"] +fulcio-rustls-tls = ["oauth-rustls-tls", "reqwest/rustls-tls", "fulcio"] +fulcio = ["oauth", "serde_with"] rekor-native-tls = ["reqwest/native-tls", "rekor"] rekor-rustls-tls = ["reqwest/rustls-tls", "rekor"] rekor = ["reqwest"] -sigstore-trust-root = ["futures-util", "tough", "regex", "tokio/sync"] +sign = ["sigstore_protobuf_specs", "fulcio", "rekor", "cert"] +verify = ["sigstore_protobuf_specs", "fulcio", "rekor", "cert"] +bundle = ["sign", "verify"] -sign = [] +sigstore-trust-root = ["sigstore_protobuf_specs", "futures-util", "tough", "regex", "tokio/sync"] cosign-native-tls = [ "oci-distribution/native-tls", @@ -56,12 +58,12 @@ cosign-rustls-tls = [ "cosign", "registry-rustls-tls", ] -cosign = [] +cosign = ["olpc-cjson"] cert = [] registry-native-tls = ["oci-distribution/native-tls", "registry"] registry-rustls-tls = ["oci-distribution/rustls-tls", "registry"] -registry = [] +registry = ["olpc-cjson"] mock-client-native-tls = ["oci-distribution/native-tls", "mock-client"] mock-client-rustls-tls = ["oci-distribution/rustls-tls", "mock-client"] @@ -74,8 +76,8 @@ async-trait = "0.1.52" base64 = "0.22.0" cached = { version = "0.49.2", optional = true, features = ["async"] } cfg-if = "1.0.0" -chrono = { version = "0.4.27", default-features = false, features = ["serde"] } -const-oid = "0.9.1" +chrono = { version = "0.4.27", default-features = false, features = ["now", "serde"] } +const-oid = { version = "0.9.6", features = ["db"] } digest = { version = "0.10.3", default-features = false } ecdsa = { version = "0.16.7", features = ["pkcs8", "digest", "der", "signing"] } ed25519 = { version = "2.2.1", features = ["alloc"] } @@ -85,13 +87,13 @@ futures = "0.3" futures-util = { version = "0.3.30", optional = true } lazy_static = "1.4.0" oci-distribution = { version = "0.11", default-features = false, optional = true } -olpc-cjson = "0.1" +olpc-cjson = { version = "0.1", optional = true } openidconnect = { version = "3.0", default-features = false, features = [ "reqwest", ], optional = true } -p256 = "0.13.2" +p256 = "0.13" p384 = "0.13" -webbrowser = "0.8.4" +webbrowser = "0.8.12" pem = { version = "3.0", features = ["serde"] } pkcs1 = { version = "0.7.5", features = ["std"] } pkcs8 = { version = "0.10.2", features = [ @@ -111,23 +113,25 @@ rsa = "0.9.2" scrypt = "0.11.0" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" -serde_with = { version = "3.4.0", features = ["base64"] } +serde_with = { version = "3.4.0", features = ["base64", "json"], optional = true } sha2 = { version = "0.10.6", features = ["oid"] } signature = { version = "2.0" } -sigstore_protobuf_specs = "0.1.0-rc.2" +sigstore_protobuf_specs = { version = "0.3.2", optional = true } thiserror = "1.0.30" tokio = { version = "1.17.0", features = ["rt"] } tokio-util = { version = "0.7.10", features = ["io-util"] } tough = { version = "0.17.1", features = ["http"], optional = true } tracing = "0.1.31" url = "2.2.2" -x509-cert = { version = "0.2.2", features = ["builder", "pem", "std"] } +x509-cert = { version = "0.2.5", features = ["builder", "pem", "std", "sct"] } crypto_secretbox = "0.1.1" zeroize = "1.5.7" rustls-webpki = { version = "0.102.1", features = ["alloc"] } serde_repr = "0.1.16" hex = "0.4.3" json-syntax = { version = "0.12.2", features = ["canonicalize", "serde"] } +tls_codec = { version = "0.4.1", features = ["derive"] } +ring = "0.17.6" [dev-dependencies] anyhow = { version = "1.0", features = ["backtrace"] } diff --git a/examples/cosign/verify/main.rs b/examples/cosign/verify/main.rs index 24980d1b04..ad8b1555d9 100644 --- a/examples/cosign/verify/main.rs +++ b/examples/cosign/verify/main.rs @@ -130,7 +130,7 @@ async fn run_app( let mut client_builder = sigstore::cosign::ClientBuilder::default().with_oci_client_config(oci_client_config); - client_builder = client_builder.with_trust_repository(frd).await?; + client_builder = client_builder.with_trust_repository(frd)?; let cert_chain: Option> = match cli.cert_chain.as_ref() { None => None, @@ -184,7 +184,7 @@ async fn run_app( } if let Some(path_to_cert) = cli.cert.as_ref() { let cert = fs::read(path_to_cert).map_err(|e| anyhow!("Cannot read cert: {:?}", e))?; - let require_rekor_bundle = if !frd.rekor_keys().await?.is_empty() { + let require_rekor_bundle = if !frd.rekor_keys()?.is_empty() { true } else { warn!("certificate based verification is weaker when Rekor integration is disabled"); @@ -229,8 +229,7 @@ async fn fulcio_and_rekor_data(cli: &Cli) -> anyhow::Result = - SigstoreTrustRoot::new(None).await?.prefetch().await; + let repo: sigstore::errors::Result = SigstoreTrustRoot::new(None).await; return Ok(Box::new(repo?)); }; diff --git a/src/bundle/mod.rs b/src/bundle/mod.rs index 2b9b9cb73c..61ad0c48b3 100644 --- a/src/bundle/mod.rs +++ b/src/bundle/mod.rs @@ -12,24 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Useful types for Sigstore bundles. +//! Sigstore bundle support. -use std::fmt::Display; +pub use sigstore_protobuf_specs::dev::sigstore::bundle::v1::Bundle; -pub use sigstore_protobuf_specs::Bundle; +mod models; -// Known Sigstore bundle media types. -#[derive(Clone, Copy, Debug)] -pub enum Version { - _Bundle0_1, - Bundle0_2, -} +#[cfg(feature = "sign")] +pub mod sign; -impl Display for Version { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match &self { - Version::_Bundle0_1 => "application/vnd.dev.sigstore.bundle+json;version=0.1", - Version::Bundle0_2 => "application/vnd.dev.sigstore.bundle+json;version=0.2", - }) - } -} +#[cfg(feature = "verify")] +pub mod verify; diff --git a/src/bundle/models.rs b/src/bundle/models.rs new file mode 100644 index 0000000000..79d27933dc --- /dev/null +++ b/src/bundle/models.rs @@ -0,0 +1,107 @@ +use std::fmt::Display; +use std::str::FromStr; + +use base64::{engine::general_purpose::STANDARD as base64, Engine as _}; +use json_syntax::Print; + +use sigstore_protobuf_specs::dev::sigstore::{ + common::v1::LogId, + rekor::v1::{Checkpoint, InclusionPromise, InclusionProof, KindVersion, TransparencyLogEntry}, +}; + +use crate::rekor::models::{ + log_entry::InclusionProof as RekorInclusionProof, LogEntry as RekorLogEntry, +}; + +// Known Sigstore bundle media types. +#[derive(Clone, Copy, Debug)] +pub enum Version { + Bundle0_1, + Bundle0_2, +} + +impl Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match &self { + Version::Bundle0_1 => "application/vnd.dev.sigstore.bundle+json;version=0.1", + Version::Bundle0_2 => "application/vnd.dev.sigstore.bundle+json;version=0.2", + }) + } +} + +impl FromStr for Version { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "application/vnd.dev.sigstore.bundle+json;version=0.1" => Ok(Version::Bundle0_1), + "application/vnd.dev.sigstore.bundle+json;version=0.2" => Ok(Version::Bundle0_2), + _ => Err(()), + } + } +} + +#[inline] +fn decode_hex>(hex: S) -> Result, ()> { + hex::decode(hex.as_ref()).or(Err(())) +} + +impl TryFrom for InclusionProof { + type Error = (); + + fn try_from(value: RekorInclusionProof) -> Result { + let hashes = value + .hashes + .iter() + .map(decode_hex) + .collect::, _>>()?; + + Ok(InclusionProof { + checkpoint: Some(Checkpoint { + envelope: value.checkpoint, + }), + hashes, + log_index: value.log_index, + root_hash: decode_hex(value.root_hash)?, + tree_size: value.tree_size, + }) + } +} + +/// Convert log entries returned from Rekor into Sigstore Bundle format entries. +impl TryFrom for TransparencyLogEntry { + type Error = (); + + fn try_from(value: RekorLogEntry) -> Result { + let canonicalized_body = { + let mut body = json_syntax::to_value(value.body).or(Err(()))?; + body.canonicalize(); + body.compact_print().to_string().into_bytes() + }; + let inclusion_promise = Some(InclusionPromise { + signed_entry_timestamp: base64 + .decode(value.verification.signed_entry_timestamp) + .or(Err(()))?, + }); + let inclusion_proof = value + .verification + .inclusion_proof + .map(|p| p.try_into()) + .transpose()?; + + Ok(TransparencyLogEntry { + canonicalized_body, + inclusion_promise, + inclusion_proof, + integrated_time: value.integrated_time, + kind_version: Some(KindVersion { + kind: "hashedrekord".to_owned(), + version: "0.0.1".to_owned(), + }), + log_id: Some(LogId { + key_id: decode_hex(value.log_i_d)?, + }), + log_index: value.log_index, + }) + } +} diff --git a/src/sign.rs b/src/bundle/sign.rs similarity index 52% rename from src/sign.rs rename to src/bundle/sign.rs index e8e17ec407..b60472c568 100644 --- a/src/sign.rs +++ b/src/bundle/sign.rs @@ -12,25 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Types for signing artifacts and producing Sigstore Bundles. +//! Types for signing artifacts and producing Sigstore bundles. use std::io::{self, Read}; use std::time::SystemTime; use base64::{engine::general_purpose::STANDARD as base64, Engine as _}; -use json_syntax::Print; +use hex; use p256::NistP256; use pkcs8::der::{Encode, EncodePem}; use sha2::{Digest, Sha256}; use signature::DigestSigner; -use sigstore_protobuf_specs::{ - Bundle, DevSigstoreBundleV1VerificationMaterial, DevSigstoreCommonV1HashOutput, - DevSigstoreCommonV1LogId, DevSigstoreCommonV1MessageSignature, - DevSigstoreCommonV1X509Certificate, DevSigstoreCommonV1X509CertificateChain, - DevSigstoreRekorV1Checkpoint, DevSigstoreRekorV1InclusionPromise, - DevSigstoreRekorV1InclusionProof, DevSigstoreRekorV1KindVersion, - DevSigstoreRekorV1TransparencyLogEntry, +use sigstore_protobuf_specs::dev::sigstore::bundle::v1::bundle; +use sigstore_protobuf_specs::dev::sigstore::bundle::v1::{ + verification_material, Bundle, VerificationMaterial, }; +use sigstore_protobuf_specs::dev::sigstore::common::v1::{ + HashAlgorithm, HashOutput, MessageSignature, X509Certificate, X509CertificateChain, +}; +use sigstore_protobuf_specs::dev::sigstore::rekor::v1::TransparencyLogEntry; use tokio::io::AsyncRead; use tokio_util::io::SyncIoBridge; use url::Url; @@ -38,35 +38,34 @@ use x509_cert::attr::{AttributeTypeAndValue, AttributeValue}; use x509_cert::builder::{Builder, RequestBuilder as CertRequestBuilder}; use x509_cert::ext::pkix as x509_ext; -use crate::bundle::Version; +use crate::bundle::models::Version; use crate::errors::{Result as SigstoreResult, SigstoreError}; use crate::fulcio::oauth::OauthTokenProvider; use crate::fulcio::{self, FulcioClient, FULCIO_ROOT}; use crate::oauth::IdentityToken; use crate::rekor::apis::configuration::Configuration as RekorConfiguration; use crate::rekor::apis::entries_api::create_log_entry; -use crate::rekor::models::LogEntry; use crate::rekor::models::{hashedrekord, proposed_entry::ProposedEntry as ProposedLogEntry}; /// An asynchronous Sigstore signing session. /// /// Sessions hold a provided user identity and key materials tied to that identity. A single -/// session may be used to sign multiple items. For more information, see [`AsyncSigningSession::sign`](Self::sign). +/// session may be used to sign multiple items. For more information, see [`SigningSession::sign`]. /// -/// This signing session operates asynchronously. To construct a synchronous [SigningSession], -/// use [`SigningContext::signer()`]. -pub struct AsyncSigningSession<'ctx> { +/// This signing session operates asynchronously. To construct a synchronous [`blocking::SigningSession`], +/// use [`SigningContext::blocking_signer()`]. +pub struct SigningSession<'ctx> { context: &'ctx SigningContext, identity_token: IdentityToken, private_key: ecdsa::SigningKey, certs: fulcio::CertificateResponse, } -impl<'ctx> AsyncSigningSession<'ctx> { +impl<'ctx> SigningSession<'ctx> { async fn new( context: &'ctx SigningContext, identity_token: IdentityToken, - ) -> SigstoreResult> { + ) -> SigstoreResult> { let (private_key, certs) = Self::materials(&context.fulcio, &identity_token).await?; Ok(Self { context, @@ -129,14 +128,13 @@ impl<'ctx> AsyncSigningSession<'ctx> { return Err(SigstoreError::ExpiredSigningSession()); } - // TODO(tnytown): Verify SCT here. + // TODO(tnytown): verify SCT here, sigstore-rs#326 // Sign artifact. let input_hash: &[u8] = &hasher.clone().finalize(); let artifact_signature: p256::ecdsa::Signature = self.private_key.sign_digest(hasher); + let signature_bytes = artifact_signature.to_der().as_bytes().to_owned(); - // Prepare inputs. - let b64_artifact_signature = base64.encode(artifact_signature.to_der()); let cert = &self.certs.cert; // Create the transparency log entry. @@ -144,9 +142,9 @@ impl<'ctx> AsyncSigningSession<'ctx> { api_version: "0.0.1".to_owned(), spec: hashedrekord::Spec { signature: hashedrekord::Signature { - content: b64_artifact_signature.clone(), + content: base64.encode(&signature_bytes), public_key: hashedrekord::PublicKey::new( - base64.encode(cert.to_pem(pkcs8::LineEnding::CRLF)?), + base64.encode(cert.to_pem(pkcs8::LineEnding::LF)?), ), }, data: hashedrekord::Data { @@ -158,22 +156,27 @@ impl<'ctx> AsyncSigningSession<'ctx> { }, }; - let entry = create_log_entry(&self.context.rekor_config, proposed_entry) + let log_entry = create_log_entry(&self.context.rekor_config, proposed_entry) .await .map_err(|err| SigstoreError::RekorClientError(err.to_string()))?; + let log_entry = log_entry + .try_into() + .or(Err(SigstoreError::RekorClientError( + "Rekor returned malformed LogEntry".into(), + )))?; // TODO(tnytown): Maybe run through the verification flow here? See sigstore-rs#296. Ok(SigningArtifact { - input_digest: base64.encode(input_hash), + input_digest: input_hash.to_owned(), cert: cert.to_der()?, - b64_signature: b64_artifact_signature, - log_entry: entry, + signature: signature_bytes, + log_entry, }) } /// Signs for the input with the session's identity. If the identity is expired, - /// [SigstoreError::ExpiredSigningSession] is returned. + /// [`SigstoreError::ExpiredSigningSession`] is returned. pub async fn sign( &self, input: R, @@ -194,48 +197,52 @@ impl<'ctx> AsyncSigningSession<'ctx> { } } -/// A synchronous Sigstore signing session. -/// -/// Sessions hold a provided user identity and key materials tied to that identity. A single -/// session may be used to sign multiple items. For more information, see [`SigningSession::sign`](Self::sign). -/// -/// This signing session operates synchronously, thus it cannot be used in an asynchronous context. -/// To construct an asynchronous [SigningSession], use [`SigningContext::async_signer()`]. -pub struct SigningSession<'ctx> { - inner: AsyncSigningSession<'ctx>, - rt: tokio::runtime::Runtime, -} - -impl<'ctx> SigningSession<'ctx> { - fn new(ctx: &'ctx SigningContext, token: IdentityToken) -> SigstoreResult { - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build()?; - let inner = rt.block_on(AsyncSigningSession::new(ctx, token))?; - Ok(Self { inner, rt }) - } +pub mod blocking { + use super::{SigningSession as AsyncSigningSession, *}; - /// Check if the session's identity token or key material is expired. + /// A synchronous Sigstore signing session. /// - /// If the session is expired, it cannot be used for signing operations, and a new session - /// must be created with a fresh identity token. - pub fn is_expired(&self) -> bool { - self.inner.is_expired() + /// Sessions hold a provided user identity and key materials tied to that identity. A single + /// session may be used to sign multiple items. For more information, see [`SigningSession::sign`]. + /// + /// This signing session operates synchronously, thus it cannot be used in an asynchronous context. + /// To construct an asynchronous [`SigningSession`], use [`SigningContext::signer()`]. + pub struct SigningSession<'ctx> { + inner: AsyncSigningSession<'ctx>, + rt: tokio::runtime::Runtime, } - /// Signs for the input with the session's identity. If the identity is expired, - /// [SigstoreError::ExpiredSigningSession] is returned. - pub fn sign(&self, mut input: R) -> SigstoreResult { - let mut hasher = Sha256::new(); - io::copy(&mut input, &mut hasher)?; - self.rt.block_on(self.inner.sign_digest(hasher)) + impl<'ctx> SigningSession<'ctx> { + pub(crate) fn new(ctx: &'ctx SigningContext, token: IdentityToken) -> SigstoreResult { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + let inner = rt.block_on(AsyncSigningSession::new(ctx, token))?; + Ok(Self { inner, rt }) + } + + /// Check if the session's identity token or key material is expired. + /// + /// If the session is expired, it cannot be used for signing operations, and a new session + /// must be created with a fresh identity token. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// Signs for the input with the session's identity. If the identity is expired, + /// [`SigstoreError::ExpiredSigningSession`] is returned. + pub fn sign(&self, mut input: R) -> SigstoreResult { + let mut hasher = Sha256::new(); + io::copy(&mut input, &mut hasher)?; + self.rt.block_on(self.inner.sign_digest(hasher)) + } } } /// A Sigstore signing context. /// /// Contexts hold Fulcio (CA) and Rekor (CT) configurations which signing sessions can be -/// constructed against. Use [`SigningContext::production`](Self::production) to create a context against +/// constructed against. Use [`SigningContext::production`] to create a context against /// the public-good Sigstore infrastructure. pub struct SigningContext { fulcio: FulcioClient, @@ -243,7 +250,7 @@ pub struct SigningContext { } impl SigningContext { - /// Manually constructs a [SigningContext] from its constituent data. + /// Manually constructs a [`SigningContext`] from its constituent data. pub fn new(fulcio: FulcioClient, rekor_config: RekorConfiguration) -> Self { Self { fulcio, @@ -251,125 +258,76 @@ impl SigningContext { } } - /// Returns a [SigningContext] configured against the public-good production Sigstore + /// Returns a [`SigningContext`] configured against the public-good production Sigstore /// infrastructure. - pub fn production() -> Self { - Self::new( + pub fn production() -> SigstoreResult { + Ok(Self::new( FulcioClient::new( Url::parse(FULCIO_ROOT).expect("constant FULCIO root fails to parse!"), crate::fulcio::TokenProvider::Oauth(OauthTokenProvider::default()), ), Default::default(), - ) + )) } - /// Configures and returns an [AsyncSigningSession] with the held context. - pub async fn async_signer( - &self, - identity_token: IdentityToken, - ) -> SigstoreResult { - AsyncSigningSession::new(self, identity_token).await + /// Configures and returns a [`SigningSession`] with the held context. + pub async fn signer(&self, identity_token: IdentityToken) -> SigstoreResult { + SigningSession::new(self, identity_token).await } - /// Configures and returns a [SigningContext] with the held context. + /// Configures and returns a [`blocking::SigningSession`] with the held context. /// - /// Async contexts must use [`SigningContext::async_signer`](Self::async_signer). - pub fn signer(&self, identity_token: IdentityToken) -> SigstoreResult { - SigningSession::new(self, identity_token) + /// Async contexts must use [`SigningContext::signer`]. + pub fn blocking_signer( + &self, + identity_token: IdentityToken, + ) -> SigstoreResult { + blocking::SigningSession::new(self, identity_token) } } /// A signature and its associated metadata. pub struct SigningArtifact { - input_digest: String, + input_digest: Vec, cert: Vec, - b64_signature: String, - log_entry: LogEntry, + signature: Vec, + log_entry: TransparencyLogEntry, } impl SigningArtifact { - /// Consumes the signing artifact and produces a Sigstore [Bundle]. + /// Consumes the signing artifact and produces a Sigstore [`Bundle`]. /// - /// The resulting bundle can be serialized with [serde_json]. + /// The resulting bundle can be serialized with [`serde_json`]. pub fn to_bundle(self) -> Bundle { - #[inline] - fn hex_to_base64>(hex: S) -> String { - let decoded = hex::decode(hex.as_ref()).expect("Malformed data in Rekor response"); - base64.encode(decoded) - } - // NOTE: We explicitly only include the leaf certificate in the bundle's "chain" // here: the specs explicitly forbid the inclusion of the root certificate, // and discourage inclusion of any intermediates (since they're in the root of // trust already). - let x_509_certificate_chain = Some(DevSigstoreCommonV1X509CertificateChain { - certificates: Some(vec![DevSigstoreCommonV1X509Certificate { - raw_bytes: Some(base64.encode(&self.cert)), - }]), - }); - - let inclusion_proof = if let Some(proof) = self.log_entry.verification.inclusion_proof { - let hashes = proof.hashes.iter().map(hex_to_base64).collect(); - Some(DevSigstoreRekorV1InclusionProof { - checkpoint: Some(DevSigstoreRekorV1Checkpoint { - envelope: Some(proof.checkpoint), - }), - hashes: Some(hashes), - log_index: Some(proof.log_index.to_string()), - root_hash: Some(hex_to_base64(proof.root_hash)), - tree_size: Some(proof.tree_size.to_string()), - }) - } else { - None - }; - - let canonicalized_body = { - let mut body = json_syntax::to_value(self.log_entry.body) - .expect("failed to parse constructed Body!"); - body.canonicalize(); - Some(base64.encode(body.compact_print().to_string())) - }; - - // TODO(tnytown): When we fix `sigstore_protobuf_specs`, have the Rekor client APIs convert - // responses into types from the specs as opposed to returning the raw `LogEntry` model type. - let tlog_entry = DevSigstoreRekorV1TransparencyLogEntry { - canonicalized_body, - inclusion_promise: Some(DevSigstoreRekorV1InclusionPromise { - // XX: sigstore-python deserializes the SET from base64 here because their protobuf - // library transparently serializes `bytes` fields as base64. - signed_entry_timestamp: Some(self.log_entry.verification.signed_entry_timestamp), - }), - inclusion_proof, - integrated_time: Some(self.log_entry.integrated_time.to_string()), - kind_version: Some(DevSigstoreRekorV1KindVersion { - kind: Some("hashedrekord".to_owned()), - version: Some("0.0.1".to_owned()), - }), - log_id: Some(DevSigstoreCommonV1LogId { - key_id: Some(hex_to_base64(self.log_entry.log_i_d)), - }), - log_index: Some(self.log_entry.log_index.to_string()), + let x509_certificate_chain = X509CertificateChain { + certificates: vec![X509Certificate { + raw_bytes: self.cert, + }], }; - let verification_material = Some(DevSigstoreBundleV1VerificationMaterial { - public_key: None, + let verification_material = Some(VerificationMaterial { timestamp_verification_data: None, - tlog_entries: Some(vec![tlog_entry]), - x_509_certificate_chain, + tlog_entries: vec![self.log_entry], + content: Some(verification_material::Content::X509CertificateChain( + x509_certificate_chain, + )), }); - let message_signature = Some(DevSigstoreCommonV1MessageSignature { - message_digest: Some(DevSigstoreCommonV1HashOutput { - algorithm: Some("SHA2_256".to_owned()), - digest: Some(self.input_digest), + let message_signature = MessageSignature { + message_digest: Some(HashOutput { + algorithm: HashAlgorithm::Sha2256.into(), + digest: self.input_digest, }), - signature: Some(self.b64_signature), - }); + signature: self.signature, + }; Bundle { - dsse_envelope: None, - media_type: Some(Version::Bundle0_2.to_string()), - message_signature, + media_type: Version::Bundle0_2.to_string(), verification_material, + content: Some(bundle::Content::MessageSignature(message_signature)), } } } diff --git a/src/bundle/verify/mod.rs b/src/bundle/verify/mod.rs new file mode 100644 index 0000000000..a6ba64330f --- /dev/null +++ b/src/bundle/verify/mod.rs @@ -0,0 +1,26 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for verifying Sigstore bundles with policies. + +mod models; + +pub use models::{VerificationError, VerificationResult}; + +pub mod policy; +pub use policy::{PolicyError, VerificationPolicy}; + +mod verifier; +pub use verifier::*; diff --git a/src/bundle/verify/models.rs b/src/bundle/verify/models.rs new file mode 100644 index 0000000000..198e9c05ee --- /dev/null +++ b/src/bundle/verify/models.rs @@ -0,0 +1,291 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::str::FromStr; + +use crate::{ + bundle::{models::Version as BundleVersion, Bundle}, + crypto::certificate::{is_leaf, is_root_ca, CertificateValidationError}, + rekor::models as rekor, +}; + +use base64::{engine::general_purpose::STANDARD as base64, Engine as _}; +use sigstore_protobuf_specs::dev::sigstore::{ + bundle::v1::{bundle, verification_material}, + rekor::v1::{InclusionProof, TransparencyLogEntry}, +}; +use thiserror::Error; +use tracing::{debug, error, warn}; +use x509_cert::{ + der::{Decode, EncodePem}, + Certificate, +}; + +use super::policy::PolicyError; + +#[derive(Error, Debug)] +pub enum Bundle01ProfileErrorKind { + #[error("bundle must contain inclusion promise")] + InclusionPromiseMissing, +} + +#[derive(Error, Debug)] +pub enum Bundle02ProfileErrorKind { + #[error("bundle must contain inclusion proof")] + InclusionProofMissing, + + #[error("bundle must contain checkpoint")] + CheckpointMissing, +} + +#[derive(Error, Debug)] +#[error(transparent)] +pub enum BundleProfileErrorKind { + Bundle01Profile(#[from] Bundle01ProfileErrorKind), + + Bundle02Profile(#[from] Bundle02ProfileErrorKind), + + #[error("unknown bundle profile {0}")] + Unknown(String), +} + +#[derive(Error, Debug)] +pub enum BundleErrorKind { + #[error("bundle missing VerificationMaterial")] + VerificationMaterialMissing, + + #[error("bundle includes unsupported VerificationMaterial::Content")] + VerificationMaterialContentUnsupported, + + #[error("bundle's certificate(s) are malformed")] + CertificateMalformed(#[source] x509_cert::der::Error), + + #[error("bundle contains a root certificate")] + RootInChain, + + #[error("bundle does not contain the signing (leaf) certificate")] + NoLeaf(#[source] CertificateValidationError), + + #[error("bundle does not contain any certificates")] + CertificatesMissing, + + #[error("bundle does not contain signature")] + SignatureMissing, + + #[error("bundle includes unsupported DSSE signature")] + DsseUnsupported, + + #[error("bundle needs 1 tlog entry, got {0}")] + TlogEntry(usize), + + #[error(transparent)] + BundleProfile(#[from] BundleProfileErrorKind), +} + +#[derive(Error, Debug)] +pub enum CertificateErrorKind { + #[error("certificate malformed")] + Malformed(#[source] webpki::Error), + + #[error("certificate expired before time of signing")] + Expired, + + #[error("certificate verification failed")] + VerificationFailed(#[source] webpki::Error), +} + +#[derive(Error, Debug)] +pub enum SignatureErrorKind { + #[error("unsupported signature algorithm")] + AlgoUnsupported(#[source] crate::errors::SigstoreError), + + #[error("signature verification failed")] + VerificationFailed(#[source] crate::errors::SigstoreError), + + #[error("signature transparency materials are inconsistent")] + Transparency, +} + +#[derive(Error, Debug)] +#[error(transparent)] +pub enum VerificationError { + #[error("unable to read input")] + Input(#[source] std::io::Error), + + Bundle(#[from] BundleErrorKind), + + Certificate(#[from] CertificateErrorKind), + + Signature(#[from] SignatureErrorKind), + + Policy(#[from] PolicyError), +} + +pub type VerificationResult = Result<(), VerificationError>; + +pub struct CheckedBundle { + pub(crate) certificate: Certificate, + pub(crate) signature: Vec, + + tlog_entry: TransparencyLogEntry, +} + +impl TryFrom for CheckedBundle { + type Error = BundleErrorKind; + + fn try_from(input: Bundle) -> Result { + let (content, mut tlog_entries) = match input.verification_material { + Some(m) => (m.content, m.tlog_entries), + _ => return Err(BundleErrorKind::VerificationMaterialMissing), + }; + + // Parse the certificates. The first entry in the chain MUST be a leaf certificate, and the + // rest of the chain MUST NOT include a root CA or any intermediate CAs that appear in an + // independent root of trust. + let certs = match content { + Some(verification_material::Content::X509CertificateChain(ch)) => ch.certificates, + Some(verification_material::Content::Certificate(cert)) => { + vec![cert] + } + _ => return Err(BundleErrorKind::VerificationMaterialContentUnsupported), + }; + let certs = certs + .iter() + .map(|c| c.raw_bytes.as_slice()) + .map(Certificate::from_der) + .collect::, _>>() + .map_err(BundleErrorKind::CertificateMalformed)?; + + let [leaf_cert, chain_certs @ ..] = &certs[..] else { + return Err(BundleErrorKind::CertificatesMissing); + }; + + is_leaf(leaf_cert).map_err(BundleErrorKind::NoLeaf)?; + + for chain_cert in chain_certs { + if is_root_ca(chain_cert).is_ok() { + return Err(BundleErrorKind::RootInChain); + } + } + + let signature = match input.content.ok_or(BundleErrorKind::SignatureMissing)? { + bundle::Content::MessageSignature(s) => s.signature, + _ => return Err(BundleErrorKind::DsseUnsupported), + }; + + if tlog_entries.len() != 1 { + return Err(BundleErrorKind::TlogEntry(tlog_entries.len())); + } + let tlog_entry = tlog_entries.remove(0); + + let (inclusion_promise, inclusion_proof) = + (&tlog_entry.inclusion_promise, &tlog_entry.inclusion_proof); + + // `inclusion_proof` is a required field in the current protobuf spec, + // but older versions of Rekor didn't provide it. Check invariants + // here and selectively allow for this case. + // + // https://github.com/sigstore/sigstore-python/pull/634#discussion_r1182769140 + let check_01_bundle = || -> Result<(), BundleProfileErrorKind> { + if inclusion_promise.is_none() { + return Err(Bundle01ProfileErrorKind::InclusionPromiseMissing)?; + } + + if matches!( + inclusion_proof, + Some(InclusionProof { + checkpoint: None, + .. + }) + ) { + debug!("0.1 bundle contains inclusion proof without checkpoint"); + } + + Ok(()) + }; + let check_02_bundle = || -> Result<(), BundleProfileErrorKind> { + if inclusion_proof.is_none() { + error!("bundle must contain inclusion proof"); + return Err(Bundle02ProfileErrorKind::InclusionProofMissing)?; + } + + if matches!( + inclusion_proof, + Some(InclusionProof { + checkpoint: None, + .. + }) + ) { + error!("bundle must contain checkpoint"); + return Err(Bundle02ProfileErrorKind::CheckpointMissing)?; + } + + Ok(()) + }; + match BundleVersion::from_str(&input.media_type) { + Ok(BundleVersion::Bundle0_1) => check_01_bundle()?, + Ok(BundleVersion::Bundle0_2) => check_02_bundle()?, + Err(_) => return Err(BundleProfileErrorKind::Unknown(input.media_type))?, + } + + Ok(Self { + certificate: leaf_cert.clone(), + signature, + tlog_entry, + }) + } +} + +impl CheckedBundle { + /// Retrieves and checks consistency of the bundle's [TransparencyLogEntry]. + pub fn tlog_entry(&self, offline: bool, input_digest: &[u8]) -> Option<&TransparencyLogEntry> { + let base64_pem_certificate = + base64.encode(self.certificate.to_pem(pkcs8::LineEnding::LF).ok()?); + + let expected_entry = rekor::Hashedrekord { + kind: "hashedrekord".to_owned(), + api_version: "0.0.1".to_owned(), + spec: rekor::hashedrekord::Spec { + signature: rekor::hashedrekord::Signature { + content: base64.encode(&self.signature), + public_key: rekor::hashedrekord::PublicKey::new(base64_pem_certificate), + }, + data: rekor::hashedrekord::Data { + hash: rekor::hashedrekord::Hash { + algorithm: rekor::hashedrekord::AlgorithmKind::sha256, + value: hex::encode(input_digest), + }, + }, + }, + }; + + let entry = if !offline && self.tlog_entry.inclusion_proof.is_none() { + warn!("online rekor fetching is not implemented yet, but is necessary for this bundle"); + return None; + } else { + &self.tlog_entry + }; + + let actual: serde_json::Value = + serde_json::from_slice(&self.tlog_entry.canonicalized_body).ok()?; + let expected: serde_json::Value = serde_json::to_value(expected_entry).ok()?; + + if actual != expected { + return None; + } + + Some(entry) + } +} diff --git a/src/bundle/verify/policy.rs b/src/bundle/verify/policy.rs new file mode 100644 index 0000000000..267badc042 --- /dev/null +++ b/src/bundle/verify/policy.rs @@ -0,0 +1,304 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Verification constraints for certificate metadata. +//! +//! + +use const_oid::ObjectIdentifier; +use thiserror::Error; +use tracing::warn; +use x509_cert::ext::pkix::{name::GeneralName, SubjectAltName}; + +macro_rules! oids { + ($($name:ident = $value:literal),+) => { + $(const $name: ObjectIdentifier = ObjectIdentifier::new_unwrap($value);)+ + }; +} + +macro_rules! impl_policy { + ($policy:ident, $oid:expr, $doc:literal) => { + #[doc = $doc] + pub struct $policy(pub String); + + impl const_oid::AssociatedOid for $policy { + const OID: ObjectIdentifier = $oid; + } + + impl SingleX509ExtPolicy for $policy { + fn new>(val: S) -> Self { + Self(val.as_ref().to_owned()) + } + + fn name() -> &'static str { + stringify!($policy) + } + + fn value(&self) -> &str { + &self.0 + } + } + }; +} + +oids! { + OIDC_ISSUER_OID = "1.3.6.1.4.1.57264.1.1", + OIDC_GITHUB_WORKFLOW_TRIGGER_OID = "1.3.6.1.4.1.57264.1.2", + OIDC_GITHUB_WORKFLOW_SHA_OID = "1.3.6.1.4.1.57264.1.3", + OIDC_GITHUB_WORKFLOW_NAME_OID = "1.3.6.1.4.1.57264.1.4", + OIDC_GITHUB_WORKFLOW_REPOSITORY_OID = "1.3.6.1.4.1.57264.1.5", + OIDC_GITHUB_WORKFLOW_REF_OID = "1.3.6.1.4.1.57264.1.6", + OTHERNAME_OID = "1.3.6.1.4.1.57264.1.7" + +} + +#[derive(Error, Debug)] +pub enum PolicyError { + #[error("did not find exactly 1 of the required extension in the certificate")] + ExtensionNotFound, + + #[error("certificate's {extension} does not match (got {actual}, expected {expected})")] + ExtensionCheckFailed { + extension: String, + expected: String, + actual: String, + }, + + #[error("{0} of {total} policies failed: {1}\n- ", + errors.len(), + errors.iter().map(|e| e.to_string()).collect::>().join("\n- ") + )] + AllOf { + total: usize, + errors: Vec, + }, + + #[error("0 of {total} policies succeeded")] + AnyOf { total: usize }, +} + +pub type PolicyResult = Result<(), PolicyError>; + +/// A policy that checks a single textual value against a X.509 extension. +pub trait SingleX509ExtPolicy { + fn new>(val: S) -> Self; + fn name() -> &'static str; + fn value(&self) -> &str; +} + +impl VerificationPolicy for T { + fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { + let extensions = cert.tbs_certificate.extensions.as_deref().unwrap_or(&[]); + let mut extensions = extensions.iter().filter(|ext| ext.extn_id == T::OID); + + // Check for exactly one extension. + let (Some(ext), None) = (extensions.next(), extensions.next()) else { + return Err(PolicyError::ExtensionNotFound); + }; + + // Parse raw string without DER encoding. + let val = std::str::from_utf8(ext.extn_value.as_bytes()) + .or(Err(PolicyError::ExtensionNotFound))?; + + if val != self.value() { + return Err(PolicyError::ExtensionCheckFailed { + extension: T::name().to_owned(), + expected: self.value().to_owned(), + actual: val.to_owned(), + }); + } + + Ok(()) + } +} + +impl_policy!( + OIDCIssuer, + OIDC_ISSUER_OID, + "Checks the certificate's OIDC issuer." +); + +impl_policy!( + GitHubWorkflowTrigger, + OIDC_GITHUB_WORKFLOW_TRIGGER_OID, + "Checks the certificate's GitHub Actions workflow trigger." +); + +impl_policy!( + GitHubWorkflowSHA, + OIDC_GITHUB_WORKFLOW_SHA_OID, + "Checks the certificate's GitHub Actions workflow commit SHA." +); + +impl_policy!( + GitHubWorkflowName, + OIDC_GITHUB_WORKFLOW_NAME_OID, + "Checks the certificate's GitHub Actions workflow name." +); + +impl_policy!( + GitHubWorkflowRepository, + OIDC_GITHUB_WORKFLOW_REPOSITORY_OID, + "Checks the certificate's GitHub Actions workflow repository." +); + +impl_policy!( + GitHubWorkflowRef, + OIDC_GITHUB_WORKFLOW_REF_OID, + "Checks the certificate's GitHub Actions workflow ref." +); + +/// An interface that all policies must conform to. +pub trait VerificationPolicy { + fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult; +} + +/// The "any of" policy, corresponding to a logical OR between child policies. +/// +/// An empty list of child policies is considered trivially invalid. +pub struct AnyOf<'a> { + children: Vec<&'a dyn VerificationPolicy>, +} + +impl<'a> AnyOf<'a> { + pub fn new(policies: I) -> Self + where + I: IntoIterator, + { + Self { + children: policies.into_iter().collect(), + } + } +} + +impl VerificationPolicy for AnyOf<'_> { + fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { + self.children + .iter() + .find(|policy| policy.verify(cert).is_err()) + .map_or(Ok(()), |_| { + Err(PolicyError::AnyOf { + total: self.children.len(), + }) + }) + } +} + +/// The "all of" policy, corresponding to a logical AND between child policies. +/// +/// An empty list of child policies is considered trivially invalid. +pub struct AllOf<'a> { + children: Vec<&'a dyn VerificationPolicy>, +} + +impl<'a> AllOf<'a> { + pub fn new(policies: I) -> Option + where + I: IntoIterator, + { + let children: Vec<_> = policies.into_iter().collect(); + + // Without this, we'd be able to construct an `AllOf` containing an empty list of child + // policies. This is almost certainly not what the user wants and is a potential source + // of API misuse, so we explicitly disallow it. + if children.is_empty() { + warn!("attempted to construct an AllOf with an empty list of child policies"); + return None; + } + + Some(Self { children }) + } +} + +impl VerificationPolicy for AllOf<'_> { + fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { + let results = self.children.iter().map(|policy| policy.verify(cert).err()); + let failures: Vec<_> = results.flatten().collect(); + + if !failures.is_empty() { + return Err(PolicyError::AllOf { + total: self.children.len(), + errors: failures, + }); + } + + Ok(()) + } +} + +pub(crate) struct UnsafeNoOp; + +impl VerificationPolicy for UnsafeNoOp { + fn verify(&self, _cert: &x509_cert::Certificate) -> PolicyResult { + warn!("unsafe (no-op) verification policy used! no verification performed!"); + Ok(()) + } +} + +/// Verifies the certificate's "identity", corresponding to the X.509v3 SAN. +/// Identities are verified modulo an OIDC issuer, so the issuer's URI +/// is also required. +/// +/// Supported SAN types include emails, URIs, and Sigstore-specific "other names". +pub struct Identity { + identity: String, + issuer: OIDCIssuer, +} + +impl Identity { + pub fn new(identity: A, issuer: B) -> Self + where + A: AsRef, + B: AsRef, + { + Self { + identity: identity.as_ref().to_owned(), + issuer: OIDCIssuer::new(issuer), + } + } +} + +impl VerificationPolicy for Identity { + fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { + self.issuer.verify(cert)?; + + let (_, san): (bool, SubjectAltName) = match cert.tbs_certificate.get() { + Ok(Some(result)) => result, + _ => return Err(PolicyError::ExtensionNotFound), + }; + + let names: Vec<_> = san + .0 + .iter() + .filter_map(|name| match name { + GeneralName::Rfc822Name(name) => Some(name.as_str()), + GeneralName::UniformResourceIdentifier(name) => Some(name.as_str()), + GeneralName::OtherName(name) if name.type_id == OTHERNAME_OID => { + std::str::from_utf8(name.value.value()).ok() + } + _ => None, + }) + .collect(); + + if !names.contains(&self.identity.as_str()) { + return Err(PolicyError::ExtensionCheckFailed { + extension: "SubjectAltName".to_owned(), + expected: self.identity.clone(), + actual: names.join(", "), + }); + } + + Ok(()) + } +} diff --git a/src/bundle/verify/verifier.rs b/src/bundle/verify/verifier.rs new file mode 100644 index 0000000000..0f4a4a5526 --- /dev/null +++ b/src/bundle/verify/verifier.rs @@ -0,0 +1,291 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Verifiers: async and blocking. + +use std::io::{self, Read}; + +use sha2::{Digest, Sha256}; +use tokio::io::{AsyncRead, AsyncReadExt}; +use tracing::debug; +use webpki::types::{CertificateDer, UnixTime}; +use x509_cert::der::Encode; + +use crate::{ + bundle::Bundle, + crypto::{CertificatePool, CosignVerificationKey, Signature}, + errors::Result as SigstoreResult, + rekor::apis::configuration::Configuration as RekorConfiguration, + trust::TrustRoot, +}; + +#[cfg(feature = "sigstore-trust-root")] +use crate::trust::sigstore::SigstoreTrustRoot; + +use super::{ + models::{CertificateErrorKind, CheckedBundle, SignatureErrorKind}, + policy::VerificationPolicy, + VerificationError, VerificationResult, +}; + +/// An asynchronous Sigstore verifier. +/// +/// For synchronous usage, see [`Verifier`]. +pub struct Verifier { + #[allow(dead_code)] + rekor_config: RekorConfiguration, + cert_pool: CertificatePool, +} + +impl Verifier { + /// Constructs a [`Verifier`]. + /// + /// For verifications against the public-good trust root, use [`Verifier::production()`]. + pub fn new( + rekor_config: RekorConfiguration, + trust_repo: R, + ) -> SigstoreResult { + let cert_pool = CertificatePool::from_certificates(trust_repo.fulcio_certs()?, [])?; + + Ok(Self { + rekor_config, + cert_pool, + }) + } + + /// Verifies an input digest against the given Sigstore Bundle, ensuring conformance to the + /// provided [`VerificationPolicy`]. + pub async fn verify_digest

( + &self, + input_digest: Sha256, + bundle: Bundle, + policy: &P, + offline: bool, + ) -> VerificationResult + where + P: VerificationPolicy, + { + let input_digest = input_digest.finalize(); + let materials: CheckedBundle = bundle.try_into()?; + + // In order to verify an artifact, we need to achieve the following: + // + // 1) Verify that the signing certificate is signed by the certificate + // chain and that the signing certificate was valid at the time + // of signing. + // 2) Verify that the signing certificate belongs to the signer. + // 3) Verify that the artifact signature was signed by the public key in the + // signing certificate. + // 4) Verify that the Rekor entry is consistent with the other signing + // materials (preventing CVE-2022-36056) + // 5) Verify the inclusion proof supplied by Rekor for this artifact, + // if we're doing online verification. + // 6) Verify the Signed Entry Timestamp (SET) supplied by Rekor for this + // artifact. + // 7) Verify that the signing certificate was valid at the time of + // signing by comparing the expiry against the integrated timestamp. + + // 1) Verify that the signing certificate is signed by the certificate + // chain and that the signing certificate was valid at the time + // of signing. + let tbs_certificate = &materials.certificate.tbs_certificate; + let issued_at = tbs_certificate.validity.not_before.to_unix_duration(); + let cert_der: CertificateDer = materials + .certificate + .to_der() + .expect("failed to DER-encode constructed Certificate!") + .into(); + let ee_cert = (&cert_der) + .try_into() + .map_err(CertificateErrorKind::Malformed)?; + + let _trusted_chain = self + .cert_pool + .verify_cert_with_time(&ee_cert, UnixTime::since_unix_epoch(issued_at)) + .map_err(CertificateErrorKind::VerificationFailed)?; + + debug!("signing certificate chains back to trusted root"); + + // TODO(tnytown): verify SCT here, sigstore-rs#326 + + // 2) Verify that the signing certificate belongs to the signer. + policy.verify(&materials.certificate)?; + debug!("signing certificate conforms to policy"); + + // 3) Verify that the signature was signed by the public key in the signing certificate + let signing_key: CosignVerificationKey = (&tbs_certificate.subject_public_key_info) + .try_into() + .map_err(SignatureErrorKind::AlgoUnsupported)?; + + let verify_sig = + signing_key.verify_prehash(Signature::Raw(&materials.signature), &input_digest); + verify_sig.map_err(SignatureErrorKind::VerificationFailed)?; + + debug!("signature corresponds to public key"); + + // 4) Verify that the Rekor entry is consistent with the other signing + // materials + let log_entry = materials + .tlog_entry(offline, &input_digest) + .ok_or(SignatureErrorKind::Transparency)?; + debug!("log entry is consistent with other materials"); + + // 5) Verify the inclusion proof supplied by Rekor for this artifact, + // if we're doing online verification. + // TODO(tnytown): Merkle inclusion; sigstore-rs#285 + + // 6) Verify the Signed Entry Timestamp (SET) supplied by Rekor for this + // artifact. + // TODO(tnytown) SET verification; sigstore-rs#285 + + // 7) Verify that the signing certificate was valid at the time of + // signing by comparing the expiry against the integrated timestamp. + let integrated_time = log_entry.integrated_time as u64; + let not_before = tbs_certificate + .validity + .not_before + .to_unix_duration() + .as_secs(); + let not_after = tbs_certificate + .validity + .not_after + .to_unix_duration() + .as_secs(); + if integrated_time < not_before || integrated_time > not_after { + return Err(CertificateErrorKind::Expired)?; + } + debug!("data signed during validity period"); + + debug!("successfully verified!"); + Ok(()) + } + + /// Verifies an input against the given Sigstore Bundle, ensuring conformance to the provided + /// [`VerificationPolicy`]. + pub async fn verify( + &self, + mut input: R, + bundle: Bundle, + policy: &P, + offline: bool, + ) -> VerificationResult + where + R: AsyncRead + Unpin + Send, + P: VerificationPolicy, + { + // arbitrary buffer size, chosen to be a multiple of the digest size. + let mut buf = [0u8; 1024]; + let mut hasher = Sha256::new(); + + loop { + match input + .read(&mut buf) + .await + .map_err(VerificationError::Input)? + { + 0 => break, + n => hasher.update(&buf[..n]), + } + } + + self.verify_digest(hasher, bundle, policy, offline).await + } +} + +impl Verifier { + /// Constructs an [`Verifier`] against the public-good trust root. + #[cfg(feature = "sigstore-trust-root")] + pub async fn production() -> SigstoreResult { + let updater = SigstoreTrustRoot::new(None).await?; + + Verifier::new(Default::default(), updater) + } +} + +pub mod blocking { + use super::{Verifier as AsyncVerifier, *}; + + /// A synchronous Sigstore verifier. + pub struct Verifier { + inner: AsyncVerifier, + rt: tokio::runtime::Runtime, + } + + impl Verifier { + /// Constructs a synchronous Sigstore verifier. + /// + /// For verifications against the public-good trust root, use [`Verifier::production()`]. + pub fn new( + rekor_config: RekorConfiguration, + trust_repo: R, + ) -> SigstoreResult { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + let inner = AsyncVerifier::new(rekor_config, trust_repo)?; + + Ok(Self { rt, inner }) + } + + /// Verifies an input digest against the given Sigstore Bundle, ensuring conformance to the + /// provided [`VerificationPolicy`]. + pub fn verify_digest

( + &self, + input_digest: Sha256, + bundle: Bundle, + policy: &P, + offline: bool, + ) -> VerificationResult + where + P: VerificationPolicy, + { + self.rt.block_on( + self.inner + .verify_digest(input_digest, bundle, policy, offline), + ) + } + + /// Verifies an input against the given Sigstore Bundle, ensuring conformance to the provided + /// [`VerificationPolicy`]. + pub fn verify( + &self, + mut input: R, + bundle: Bundle, + policy: &P, + offline: bool, + ) -> VerificationResult + where + R: Read, + P: VerificationPolicy, + { + let mut hasher = Sha256::new(); + io::copy(&mut input, &mut hasher).map_err(VerificationError::Input)?; + + self.verify_digest(hasher, bundle, policy, offline) + } + } + + impl Verifier { + /// Constructs a synchronous [`Verifier`] against the public-good trust root. + #[cfg(feature = "sigstore-trust-root")] + pub fn production() -> SigstoreResult { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + let inner = rt.block_on(AsyncVerifier::production())?; + + Ok(Verifier { inner, rt }) + } + } +} diff --git a/src/cosign/client.rs b/src/cosign/client.rs index af70bf457b..1c97e113af 100644 --- a/src/cosign/client.rs +++ b/src/cosign/client.rs @@ -37,15 +37,15 @@ pub const CONFIG_DATA: &str = "{}"; /// Cosign Client /// /// Instances of `Client` can be built via [`sigstore::cosign::ClientBuilder`](crate::cosign::ClientBuilder). -pub struct Client<'a> { +pub struct Client { pub(crate) registry_client: Box, pub(crate) rekor_pub_key: Option, - pub(crate) fulcio_cert_pool: Option>, + pub(crate) fulcio_cert_pool: Option, } #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] -impl CosignCapabilities for Client<'_> { +impl CosignCapabilities for Client { async fn triangulate( &mut self, image: &OciReference, @@ -140,7 +140,7 @@ impl CosignCapabilities for Client<'_> { } } -impl Client<'_> { +impl Client { /// Internal helper method used to fetch data from an OCI registry async fn fetch_manifest_and_layers( &mut self, @@ -177,7 +177,7 @@ mod tests { use crate::crypto::SigningScheme; use crate::mock_client::test::MockOciClient; - fn build_test_client(mock_client: MockOciClient) -> Client<'static> { + fn build_test_client(mock_client: MockOciClient) -> Client { let rekor_pub_key = CosignVerificationKey::from_pem(REKOR_PUB_KEY.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); diff --git a/src/cosign/client_builder.rs b/src/cosign/client_builder.rs index d37bca7cf9..1a688d9139 100644 --- a/src/cosign/client_builder.rs +++ b/src/cosign/client_builder.rs @@ -28,7 +28,7 @@ use crate::trust::TrustRoot; /// ## Rekor integration /// /// Rekor integration can be enabled by specifying Rekor's public key. -/// This can be provided via a [`crate::sigstore::ManualTrustRoot`]. +/// This can be provided via a [`crate::trust::ManualTrustRoot`]. /// /// > Note well: the [`sigstore`](crate::sigstore) module provides helper structs and methods /// > to obtain this data from the official TUF repository of the Sigstore project. @@ -72,15 +72,12 @@ impl<'a> ClientBuilder<'a> { /// /// Enables Fulcio and Rekor integration with the given trust repository. /// See [crate::sigstore::TrustRoot] for more details on trust repositories. - pub async fn with_trust_repository( - mut self, - repo: &'a R, - ) -> Result { - let rekor_keys = repo.rekor_keys().await?; + pub fn with_trust_repository(mut self, repo: &'a R) -> Result { + let rekor_keys = repo.rekor_keys()?; if !rekor_keys.is_empty() { self.rekor_pub_key = Some(rekor_keys[0]); } - self.fulcio_certs = repo.fulcio_certs().await?; + self.fulcio_certs = repo.fulcio_certs()?; Ok(self) } @@ -94,7 +91,7 @@ impl<'a> ClientBuilder<'a> { self } - pub fn build(self) -> Result> { + pub fn build(self) -> Result { let rekor_pub_key = match self.rekor_pub_key { None => { info!("Rekor public key not provided. Rekor integration disabled"); diff --git a/src/cosign/mod.rs b/src/cosign/mod.rs index 2debf012f4..f05e416899 100644 --- a/src/cosign/mod.rs +++ b/src/cosign/mod.rs @@ -335,7 +335,7 @@ TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ #[cfg(feature = "test-registry")] const SIGNED_IMAGE: &str = "busybox:1.34"; - pub(crate) fn get_fulcio_cert_pool() -> CertificatePool<'static> { + pub(crate) fn get_fulcio_cert_pool() -> CertificatePool { fn pem_to_der<'a>(input: &'a str) -> CertificateDer<'a> { let pem_cert = pem::parse(input).unwrap(); assert_eq!(pem_cert.tag(), "CERTIFICATE"); @@ -642,7 +642,7 @@ TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ } #[cfg(feature = "test-registry")] - async fn prepare_image_to_be_signed(client: &mut Client<'_>, image_ref: &OciReference) { + async fn prepare_image_to_be_signed(client: &mut Client, image_ref: &OciReference) { let data = client .registry_client .pull( diff --git a/src/crypto/certificate.rs b/src/crypto/certificate.rs index 0b5685764e..8c02209e08 100644 --- a/src/crypto/certificate.rs +++ b/src/crypto/certificate.rs @@ -15,8 +15,9 @@ use chrono::{DateTime, Utc}; use const_oid::db::rfc5912::ID_KP_CODE_SIGNING; +use thiserror::Error; use x509_cert::{ - ext::pkix::{ExtendedKeyUsage, KeyUsage, KeyUsages, SubjectAltName}, + ext::pkix::{constraints, ExtendedKeyUsage, KeyUsage, KeyUsages, SubjectAltName}, Certificate, }; @@ -121,6 +122,198 @@ fn verify_expiration(certificate: &Certificate, integrated_time: i64) -> Result< Ok(()) } +#[derive(Debug, Error)] +pub enum ExtensionErrorKind { + #[error("certificate missing extension: {0}")] + Missing(&'static str), + + #[error("certificate extension bit not asserted: {0}")] + BitUnset(&'static str), + + #[error("certificate's {0} extension not marked as critical")] + NotCritical(&'static str), +} + +#[derive(Debug, Error)] +pub enum NotLeafErrorKind { + #[error("certificate is a CA: CAs are not leaves")] + IsCA, +} + +#[derive(Debug, Error)] +pub enum NotCAErrorKind { + #[error("certificate is not a CA: CAs must assert cA and keyCertSign")] + NotCA, + + #[error("certificate is not a root CA")] + NotRootCA, + + #[error("certificate in invalid state: cA={ca}, keyCertSign={key_cert_sign}")] + Invalid { ca: bool, key_cert_sign: bool }, +} + +#[derive(Debug, Error)] +#[error(transparent)] +pub enum CertificateValidationError { + #[error("only X509 V3 certificates are supported")] + VersionUnsupported, + + #[error("malformed certificate")] + Malformed(#[source] x509_cert::der::Error), + + NotLeaf(#[from] NotLeafErrorKind), + + NotCA(#[from] NotCAErrorKind), + + Extension(#[from] ExtensionErrorKind), +} + +/// Check if the given certificate is a leaf in the context of the Sigstore profile. +/// +/// * It is not a root or intermediate CA; +/// * It has `keyUsage.digitalSignature` +/// * It has `CODE_SIGNING` as an `ExtendedKeyUsage`. +/// +/// This function does not evaluate the trustworthiness of the certificate. +pub(crate) fn is_leaf( + certificate: &Certificate, +) -> core::result::Result<(), CertificateValidationError> { + // NOTE(jl): following structure of sigstore-python over the slightly different handling found + // in `verify_key_usages`. + let tbs = &certificate.tbs_certificate; + + // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack + // extensions and have ambiguous CA behavior. + if tbs.version != x509_cert::Version::V3 { + Err(CertificateValidationError::VersionUnsupported)?; + } + + if is_ca(certificate).is_ok() { + Err(NotLeafErrorKind::IsCA)?; + }; + + let digital_signature = match tbs + .get::() + .map_err(CertificateValidationError::Malformed)? + { + None => Err(ExtensionErrorKind::Missing("KeyUsage"))?, + Some((_, key_usage)) => key_usage.digital_signature(), + }; + + if !digital_signature { + Err(ExtensionErrorKind::BitUnset("KeyUsage.digitalSignature"))?; + } + + // Finally, we check to make sure the leaf has an `ExtendedKeyUsages` + // extension that includes a codesigning entitlement. Sigstore should + // never issue a leaf that doesn't have this extended usage. + + let extended_key_usage = match tbs + .get::() + .map_err(CertificateValidationError::Malformed)? + { + None => Err(ExtensionErrorKind::Missing("ExtendedKeyUsage"))?, + Some((_, extended_key_usage)) => extended_key_usage, + }; + + if !extended_key_usage.0.contains(&ID_KP_CODE_SIGNING) { + Err(ExtensionErrorKind::BitUnset( + "ExtendedKeyUsage.digitalSignature", + ))?; + } + + Ok(()) +} + +/// Checks if the given `certificate` is a CA certificate. +/// +/// This does **not** indicate trustworthiness of the given `certificate`, only if it has the +/// appropriate interior state. +/// +/// This function is **not** naively invertible: users **must** use the dedicated `is_leaf` +/// utility function to determine whether a particular leaf upholds Sigstore's invariants. +pub(crate) fn is_ca( + certificate: &Certificate, +) -> core::result::Result<(), CertificateValidationError> { + let tbs = &certificate.tbs_certificate; + + // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack + // extensions and have ambiguous CA behavior. + if tbs.version != x509_cert::Version::V3 { + return Err(CertificateValidationError::VersionUnsupported); + } + + // Valid CA certificates must have the following set: + // + // - `BasicKeyUsage.keyCertSign` + // - `BasicConstraints.ca` + // + // Any other combination of states is inconsistent and invalid, meaning + // that we won't treat the certificate as neither a leaf nor a CA. + + let ca = match tbs + .get::() + .map_err(CertificateValidationError::Malformed)? + { + None => Err(ExtensionErrorKind::Missing("BasicConstraints"))?, + Some((false, _)) => { + // BasicConstraints must be marked as critical, per RFC 5280 4.2.1.9. + Err(ExtensionErrorKind::NotCritical("BasicConstraints"))? + } + Some((true, v)) => v.ca, + }; + + let key_cert_sign = match tbs + .get::() + .map_err(CertificateValidationError::Malformed)? + { + None => Err(ExtensionErrorKind::Missing("KeyUsage"))?, + Some((_, v)) => v.key_cert_sign(), + }; + + // both states set, this is a CA. + if ca && key_cert_sign { + return Ok(()); + } + + if !(ca || key_cert_sign) { + Err(NotCAErrorKind::NotCA)?; + } + + // Anything else is an invalid state that should never occur. + Err(NotCAErrorKind::Invalid { ca, key_cert_sign })? +} + +/// Returns `True` if and only if the given `Certificate` indicates +/// that it's a root CA. +/// +/// This is **not** a verification function, and it does not establish +/// the trustworthiness of the given certificate. +pub(crate) fn is_root_ca( + certificate: &Certificate, +) -> core::result::Result<(), CertificateValidationError> { + // NOTE(ww): This function is obnoxiously long to make the different + // states explicit. + + let tbs = &certificate.tbs_certificate; + + // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack + // extensions and have ambiguous CA behavior. + if tbs.version != x509_cert::Version::V3 { + return Err(CertificateValidationError::VersionUnsupported); + } + + // Non-CAs can't possibly be root CAs. + is_ca(certificate)?; + + // A certificate that is its own issuer and signer is considered a root CA. + if tbs.issuer != tbs.subject { + Err(NotCAErrorKind::NotRootCA)? + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/crypto/certificate_pool.rs b/src/crypto/certificate_pool.rs index 731a68c6c3..1fddead331 100644 --- a/src/crypto/certificate_pool.rs +++ b/src/crypto/certificate_pool.rs @@ -16,34 +16,37 @@ use const_oid::db::rfc5280::ID_KP_CODE_SIGNING; use webpki::{ types::{CertificateDer, TrustAnchor, UnixTime}, - EndEntityCert, KeyUsage, + EndEntityCert, KeyUsage, VerifiedPath, }; -use crate::errors::{Result, SigstoreError}; +use crate::errors::{Result as SigstoreResult, SigstoreError}; /// A collection of trusted root certificates. #[derive(Default, Debug)] -pub(crate) struct CertificatePool<'a> { - trusted_roots: Vec>, - intermediates: Vec>, +pub(crate) struct CertificatePool { + trusted_roots: Vec>, + intermediates: Vec>, } -impl<'a> CertificatePool<'a> { +impl CertificatePool { /// Builds a `CertificatePool` instance using the provided list of [`Certificate`]. - pub(crate) fn from_certificates( + pub(crate) fn from_certificates<'r, 'i, R, I>( trusted_roots: R, untrusted_intermediates: I, - ) -> Result> + ) -> SigstoreResult where - R: IntoIterator>, - I: IntoIterator>, + R: IntoIterator>, + I: IntoIterator>, { Ok(CertificatePool { trusted_roots: trusted_roots .into_iter() .map(|x| Ok(webpki::anchor_from_trusted_cert(&x)?.to_owned())) .collect::, webpki::Error>>()?, - intermediates: untrusted_intermediates.into_iter().collect(), + intermediates: untrusted_intermediates + .into_iter() + .map(|i| i.into_owned()) + .collect(), }) } @@ -59,7 +62,7 @@ impl<'a> CertificatePool<'a> { &self, cert_pem: &[u8], verification_time: Option, - ) -> Result<()> { + ) -> SigstoreResult<()> { let cert_pem = pem::parse(cert_pem)?; if cert_pem.tag() != "CERTIFICATE" { return Err(SigstoreError::CertificatePoolError( @@ -82,22 +85,28 @@ impl<'a> CertificatePool<'a> { &self, der: &[u8], verification_time: Option, - ) -> Result<()> { - self.verify_cert_with_time(der, verification_time.unwrap_or(UnixTime::now())) + ) -> SigstoreResult<()> { + let der = CertificateDer::from(der); + let cert = EndEntityCert::try_from(&der)?; + let time = std::time::Duration::from_secs(chrono::Utc::now().timestamp() as u64); + + self.verify_cert_with_time( + &cert, + verification_time.unwrap_or(UnixTime::since_unix_epoch(time)), + )?; + + Ok(()) } - /// TODO(tnytown): nudge webpki into behaving as the cosign code expects - pub(crate) fn verify_cert_with_time( - &self, - cert: &[u8], + pub(crate) fn verify_cert_with_time<'a, 'cert>( + &'a self, + cert: &'cert EndEntityCert<'cert>, verification_time: UnixTime, - ) -> Result<()> { - let der = CertificateDer::from(cert); - let cert = EndEntityCert::try_from(&der)?; - - // TODO(tnytown): Determine which of these algs are used in the Sigstore ecosystem. + ) -> Result, webpki::Error> + where + 'a: 'cert, + { let signing_algs = webpki::ALL_VERIFICATION_ALGS; - let eku_code_signing = ID_KP_CODE_SIGNING.as_bytes(); cert.verify_for_usage( @@ -108,8 +117,6 @@ impl<'a> CertificatePool<'a> { KeyUsage::required(eku_code_signing), None, None, - )?; - - Ok(()) + ) } } diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs index fd1667f9b7..880088cbf9 100644 --- a/src/crypto/mod.rs +++ b/src/crypto/mod.rs @@ -175,6 +175,8 @@ pub enum Signature<'a> { pub(crate) mod certificate; #[cfg(feature = "cert")] pub(crate) mod certificate_pool; +#[cfg(feature = "cert")] +pub(crate) use certificate_pool::CertificatePool; pub mod verification_key; diff --git a/src/crypto/verification_key.rs b/src/crypto/verification_key.rs index b1c54a3c0c..5d877750c8 100644 --- a/src/crypto/verification_key.rs +++ b/src/crypto/verification_key.rs @@ -18,7 +18,7 @@ use const_oid::db::rfc5912::{ID_EC_PUBLIC_KEY, RSA_ENCRYPTION}; use ed25519::pkcs8::DecodePublicKey as ED25519DecodePublicKey; use rsa::{pkcs1v15, pss}; use sha2::{Digest, Sha256, Sha384}; -use signature::{DigestVerifier, Verifier}; +use signature::{hazmat::PrehashVerifier, DigestVerifier, Verifier}; use x509_cert::{der::referenced::OwnedToRef, spki::SubjectPublicKeyInfoOwned}; use super::{ @@ -328,6 +328,70 @@ impl CosignVerificationKey { } } } + + /// Verify the signature provided has been actually generated by the given key + /// when signing the provided prehashed message. + pub(crate) fn verify_prehash(&self, signature: Signature, msg: &[u8]) -> Result<()> { + let sig = match signature { + Signature::Raw(data) => data.to_owned(), + Signature::Base64Encoded(data) => BASE64_STD_ENGINE.decode(data)?, + }; + + match self { + CosignVerificationKey::RSA_PSS_SHA256(inner) => { + let sig = pss::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::RSA_PSS_SHA384(inner) => { + let sig = pss::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::RSA_PSS_SHA512(inner) => { + let sig = pss::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::RSA_PKCS1_SHA256(inner) => { + let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::RSA_PKCS1_SHA384(inner) => { + let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::RSA_PKCS1_SHA512(inner) => { + let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + // ECDSA signatures are encoded in der. + CosignVerificationKey::ECDSA_P256_SHA256_ASN1(inner) => { + let sig = ecdsa::Signature::from_der(&sig)?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::ECDSA_P384_SHA384_ASN1(inner) => { + let sig = ecdsa::Signature::from_der(&sig)?; + inner + .verify_prehash(msg, &sig) + .map_err(|_| SigstoreError::PublicKeyVerificationError) + } + CosignVerificationKey::ED25519(_) => { + unimplemented!("Ed25519 doesn't implement verify_prehash") + } + } + } } #[cfg(test)] diff --git a/src/errors.rs b/src/errors.rs index 5ba05393cb..56a3f93856 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -133,7 +133,8 @@ pub enum SigstoreError { #[error(transparent)] JoinError(#[from] tokio::task::JoinError), - #[cfg(feature = "sign")] + // HACK(tnytown): Remove when we rework the Fulcio V2 endpoint. + #[cfg(feature = "fulcio")] #[error(transparent)] ReqwestError(#[from] reqwest::Error), diff --git a/src/fulcio/mod.rs b/src/fulcio/mod.rs index 8c034e2333..9166fa0a1d 100644 --- a/src/fulcio/mod.rs +++ b/src/fulcio/mod.rs @@ -10,16 +10,14 @@ use crate::fulcio::oauth::OauthTokenProvider; use crate::oauth::IdentityToken; use base64::{engine::general_purpose::STANDARD as BASE64_STD_ENGINE, Engine as _}; use openidconnect::core::CoreIdToken; -use pkcs8::der::Decode; use reqwest::{header, Body}; use serde::ser::SerializeStruct; use serde::{Serialize, Serializer}; use std::fmt::{Debug, Display, Formatter}; -use tracing::debug; use url::Url; -use x509_cert::Certificate; +use x509_cert::{der::Decode, Certificate}; -pub use models::CertificateResponse; +pub use models::{CertificateResponse, SigningCertificateDetachedSCT}; /// Default public Fulcio server root. pub const FULCIO_ROOT: &str = "https://fulcio.sigstore.dev/"; @@ -205,7 +203,7 @@ impl FulcioClient { /// /// TODO(tnytown): This (and other API clients) should be autogenerated. See sigstore-rs#209. /// - /// https://github.com/sigstore/fulcio/blob/main/fulcio.proto + /// /// /// Additionally, it might not be reasonable to expect callers to correctly construct and pass /// in an X509 CSR. @@ -231,24 +229,23 @@ impl FulcioClient { header::ACCEPT => "application/pem-certificate-chain" ); - let response: SigningCertificate = client + let response = client .post(self.root_url.join(SIGNING_CERT_V2_PATH)?) .headers(headers) .json(&CreateSigningCertificateRequest { certificate_signing_request: request, }) .send() - .await? - .json() .await?; + let response = response.json().await?; - let sct_embedded = matches!( - response, - SigningCertificate::SignedCertificateEmbeddedSct(_) - ); - let certs = match response { - SigningCertificate::SignedCertificateDetachedSct(ref sc) => &sc.chain.certificates, - SigningCertificate::SignedCertificateEmbeddedSct(ref sc) => &sc.chain.certificates, + let (certs, detached_sct) = match response { + SigningCertificate::SignedCertificateDetachedSct(ref sc) => { + (&sc.chain.certificates, Some(sc.clone())) + } + SigningCertificate::SignedCertificateEmbeddedSct(ref sc) => { + (&sc.chain.certificates, None) + } }; if certs.len() < 2 { @@ -263,19 +260,10 @@ impl FulcioClient { .map(|pem| Certificate::from_der(pem.contents())) .collect::, _>>()?; - // TODO(tnytown): Implement SCT extraction. - // see: https://github.com/RustCrypto/formats/pull/1134 - if sct_embedded { - debug!("PrecertificateSignedCertificateTimestamps isn't implemented yet in x509_cert."); - } else { - // No embedded SCT, Fulcio instance that provides detached SCT: - if let SigningCertificate::SignedCertificateDetachedSct(_sct) = response {} - }; - Ok(CertificateResponse { cert, chain, - // sct, + detached_sct, }) } } diff --git a/src/fulcio/models.rs b/src/fulcio/models.rs index e39e335895..3441f4ca44 100644 --- a/src/fulcio/models.rs +++ b/src/fulcio/models.rs @@ -14,13 +14,17 @@ //! Models for interfacing with Fulcio. //! -//! https://github.com/sigstore/fulcio/blob/9da27be4fb64b85c907ab9ddd8a5d3cbd38041d4/fulcio.proto +//! -use base64::{engine::general_purpose::STANDARD as BASE64_STD_ENGINE, Engine as _}; use pem::Pem; use pkcs8::der::EncodePem; -use serde::{Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_repr::Deserialize_repr; +use serde_with::{ + base64::{Base64, Standard}, + formats::Padded, + serde_as, DeserializeAs, SerializeAs, +}; use x509_cert::Certificate; fn serialize_x509_csr( @@ -31,11 +35,36 @@ where S: Serializer, { let encoded = input - .to_pem(pkcs8::LineEnding::CRLF) + .to_pem(pkcs8::LineEnding::LF) .map_err(serde::ser::Error::custom)?; - let encoded = BASE64_STD_ENGINE.encode(encoded); - ser.serialize_str(&encoded) + Base64::::serialize_as(&encoded, ser) +} + +fn deserialize_inner_detached_sct<'de, D>(de: D) -> std::result::Result +where + D: Deserializer<'de>, +{ + let buf: Vec = Base64::::deserialize_as(de)?; + serde_json::from_slice(&buf).map_err(serde::de::Error::custom) +} + +fn deserialize_inner_detached_sct_signature<'de, D>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let buf: Vec = Base64::::deserialize_as(de)?; + + // The first two bytes indicate the signature and hash algorithms so let's skip those. + // The next two bytes indicate the size of the signature. + let signature_size = u16::from_be_bytes(buf[2..4].try_into().expect("unexpected length")); + + // This should be equal to the length of the remainder of the signature buffer. + let signature = buf[4..].to_vec(); + if signature_size as usize != signature.len() { + return Err(serde::de::Error::custom("signature size mismatch")); + } + Ok(signature) } #[derive(Serialize)] @@ -52,10 +81,12 @@ pub enum SigningCertificate { SignedCertificateEmbeddedSct(SigningCertificateEmbeddedSCT), } -#[derive(Deserialize)] +#[derive(Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct SigningCertificateDetachedSCT { pub chain: CertificateChain, + #[serde(deserialize_with = "deserialize_inner_detached_sct")] + pub signed_certificate_timestamp: InnerDetachedSCT, } #[derive(Deserialize)] @@ -64,12 +95,25 @@ pub struct SigningCertificateEmbeddedSCT { pub chain: CertificateChain, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug, Clone)] pub struct CertificateChain { pub certificates: Vec, } -#[derive(Deserialize_repr, PartialEq, Debug)] +#[serde_as] +#[derive(Deserialize, Debug, Clone)] +pub struct InnerDetachedSCT { + pub sct_version: SCTVersion, + #[serde_as(as = "Base64")] + pub id: [u8; 32], + pub timestamp: u64, + #[serde(deserialize_with = "deserialize_inner_detached_sct_signature")] + pub signature: Vec, + #[serde_as(as = "Base64")] + pub extensions: Vec, +} + +#[derive(Deserialize_repr, PartialEq, Debug, Clone)] #[repr(u8)] pub enum SCTVersion { V1 = 0, @@ -78,5 +122,5 @@ pub enum SCTVersion { pub struct CertificateResponse { pub cert: Certificate, pub chain: Vec, - // pub sct: InnerDetachedSCT, + pub detached_sct: Option, } diff --git a/src/lib.rs b/src/lib.rs index e715b80078..fa336c1029 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,7 +100,6 @@ //! //! let mut client = sigstore::cosign::ClientBuilder::default() //! .with_trust_repository(&repo) -//! .await //! .expect("Cannot construct cosign client from given materials") //! .build() //! .expect("Unexpected failure while building Client"); @@ -283,9 +282,5 @@ pub mod registry; #[cfg(feature = "rekor")] pub mod rekor; -// Don't export yet -- these types should only be useful internally. -mod bundle; -pub use bundle::Bundle; - -#[cfg(feature = "sign")] -pub mod sign; +#[cfg(any(feature = "sign", feature = "verify"))] +pub mod bundle; diff --git a/src/trust/mod.rs b/src/trust/mod.rs index b4c7f7507f..966bb238fc 100644 --- a/src/trust/mod.rs +++ b/src/trust/mod.rs @@ -13,18 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use async_trait::async_trait; use webpki::types::CertificateDer; #[cfg(feature = "sigstore-trust-root")] pub mod sigstore; /// A `TrustRoot` owns all key material necessary for establishing a root of trust. -#[cfg_attr(not(target_arch = "wasm32"), async_trait)] -#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] -pub trait TrustRoot: Send + Sync { - async fn fulcio_certs(&self) -> crate::errors::Result>; - async fn rekor_keys(&self) -> crate::errors::Result>; +pub trait TrustRoot { + fn fulcio_certs(&self) -> crate::errors::Result>; + fn rekor_keys(&self) -> crate::errors::Result>; + fn ctfe_keys(&self) -> crate::errors::Result>; } /// A `ManualTrustRoot` is a [TrustRoot] with out-of-band trust materials. @@ -33,23 +31,25 @@ pub trait TrustRoot: Send + Sync { pub struct ManualTrustRoot<'a> { pub fulcio_certs: Option>>, pub rekor_key: Option>, + pub ctfe_keys: Vec>, } -#[cfg(not(target_arch = "wasm32"))] -#[async_trait] impl TrustRoot for ManualTrustRoot<'_> { - #[cfg(not(target_arch = "wasm32"))] - async fn fulcio_certs(&self) -> crate::errors::Result> { + fn fulcio_certs(&self) -> crate::errors::Result> { Ok(match &self.fulcio_certs { Some(certs) => certs.clone(), None => Vec::new(), }) } - async fn rekor_keys(&self) -> crate::errors::Result> { + fn rekor_keys(&self) -> crate::errors::Result> { Ok(match &self.rekor_key { Some(key) => vec![&key[..]], None => Vec::new(), }) } + + fn ctfe_keys(&self) -> crate::errors::Result> { + Ok(self.ctfe_keys.iter().map(|v| &v[..]).collect()) + } } diff --git a/src/trust/sigstore/constants.rs b/src/trust/sigstore/constants.rs index 325989706c..d4630aaf12 100644 --- a/src/trust/sigstore/constants.rs +++ b/src/trust/sigstore/constants.rs @@ -16,10 +16,21 @@ pub(crate) const SIGSTORE_METADATA_BASE: &str = "https://tuf-repo-cdn.sigstore.dev"; pub(crate) const SIGSTORE_TARGET_BASE: &str = "https://tuf-repo-cdn.sigstore.dev/targets"; -macro_rules! tuf_resource { - ($path:literal) => { - include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/trust_root/", $path)) +macro_rules! impl_static_resource { + {$($name:literal,)+} => { + #[inline] + pub(crate) fn static_resource(name: N) -> Option<&'static [u8]> where N: AsRef { + match name.as_ref() { + $( + $name => Some(include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/trust_root/prod/", $name))) + ),+, + _ => None, + } + } }; } -pub(crate) const SIGSTORE_ROOT: &[u8] = tuf_resource!("prod/root.json"); +impl_static_resource! { + "root.json", + "trusted_root.json", +} diff --git a/src/trust/sigstore/mod.rs b/src/trust/sigstore/mod.rs index b843b9b783..9ee9f02cce 100644 --- a/src/trust/sigstore/mod.rs +++ b/src/trust/sigstore/mod.rs @@ -20,124 +20,127 @@ //! //! These can later be given to [`cosign::ClientBuilder`](crate::cosign::ClientBuilder) //! to enable Fulcio and Rekor integrations. -//! -//! # Example -//! -//! The `SigstoreRootTrust` instance can be created via the [`SigstoreTrustRoot::prefetch`] -//! method. -//! -/// ```rust -/// # use sigstore::trust::sigstore::SigstoreTrustRoot; -/// # use sigstore::errors::Result; -/// # #[tokio::main] -/// # async fn main() -> std::result::Result<(), anyhow::Error> { -/// let repo: Result = SigstoreTrustRoot::new(None).await?.prefetch().await; -/// // Now, get Fulcio and Rekor trust roots with the returned `SigstoreRootTrust` -/// # Ok(()) -/// # } -/// ``` -use async_trait::async_trait; -use futures::StreamExt; +use futures_util::TryStreamExt; use sha2::{Digest, Sha256}; -use std::{ - fs, - path::{Path, PathBuf}, +use std::path::Path; +use tokio_util::bytes::BytesMut; + +use sigstore_protobuf_specs::dev::sigstore::{ + common::v1::TimeRange, + trustroot::v1::{CertificateAuthority, TransparencyLogInstance, TrustedRoot}, }; -use tokio::sync::OnceCell; use tough::TargetName; use tracing::debug; use webpki::types::CertificateDer; mod constants; -mod trustroot; -use self::trustroot::{CertificateAuthority, TimeRange, TransparencyLogInstance, TrustedRoot}; use crate::errors::{Result, SigstoreError}; pub use crate::trust::{ManualTrustRoot, TrustRoot}; /// Securely fetches Rekor public key and Fulcio certificates from Sigstore's TUF repository. #[derive(Debug)] pub struct SigstoreTrustRoot { - repository: tough::Repository, - checkout_dir: Option, - trusted_root: OnceCell, + trusted_root: TrustedRoot, } impl SigstoreTrustRoot { - /// Constructs a new trust repository established by a [tough::Repository]. - pub async fn new(checkout_dir: Option<&Path>) -> Result { + /// Constructs a new trust root from a [`tough::Repository`]. + async fn from_tough( + repository: &tough::Repository, + checkout_dir: Option<&Path>, + ) -> Result { + let trusted_root = { + let data = Self::fetch_target(repository, checkout_dir, "trusted_root.json").await?; + serde_json::from_slice(&data[..])? + }; + + Ok(Self { trusted_root }) + } + + /// Constructs a new trust root backed by the Sigstore Public Good Instance. + pub async fn new(cache_dir: Option<&Path>) -> Result { // These are statically defined and should always parse correctly. let metadata_base = url::Url::parse(constants::SIGSTORE_METADATA_BASE)?; let target_base = url::Url::parse(constants::SIGSTORE_TARGET_BASE)?; - let repository = - tough::RepositoryLoader::new(&constants::SIGSTORE_ROOT, metadata_base, target_base) - .expiration_enforcement(tough::ExpirationEnforcement::Safe) - .load() - .await - .map_err(Box::new)?; - - Ok(Self { - repository, - checkout_dir: checkout_dir.map(ToOwned::to_owned), - trusted_root: OnceCell::default(), - }) + let repository = tough::RepositoryLoader::new( + &constants::static_resource("root.json").expect("Failed to fetch embedded TUF root!"), + metadata_base, + target_base, + ) + .expiration_enforcement(tough::ExpirationEnforcement::Safe) + .load() + .await + .map_err(Box::new)?; + + Self::from_tough(&repository, cache_dir).await } - async fn trusted_root(&self) -> Result<&TrustedRoot> { - async fn init_trusted_root( - repository: &tough::Repository, - checkout_dir: Option<&PathBuf>, - ) -> Result { - let trusted_root_target = TargetName::new("trusted_root.json").map_err(Box::new)?; - let local_path = checkout_dir.map(|d| d.join(trusted_root_target.raw())); + async fn fetch_target( + repository: &tough::Repository, + checkout_dir: Option<&Path>, + name: N, + ) -> Result> + where + N: TryInto, + { + let name: TargetName = name.try_into().map_err(Box::new)?; + let local_path = checkout_dir.as_ref().map(|d| d.join(name.raw())); - let data = fetch_target_or_reuse_local_cache( - repository, - &trusted_root_target, - local_path.as_ref(), - ) - .await?; + let read_remote_target = || async { + match repository.read_target(&name).await { + Ok(Some(s)) => Ok(s.try_collect::().await.map_err(Box::new)?), + _ => Err(SigstoreError::TufTargetNotFoundError(name.raw().to_owned())), + } + }; - debug!("data:\n{}", String::from_utf8_lossy(&data)); + // First, try reading the target from disk cache. + let data = if let Some(Ok(local_data)) = local_path.as_ref().map(std::fs::read) { + debug!("{}: reading from disk cache", name.raw()); + local_data.to_vec() + // Try reading the target embedded into the binary. + } else if let Some(embedded_data) = constants::static_resource(name.raw()) { + debug!("{}: reading from embedded resources", name.raw()); + embedded_data.to_vec() + // If all else fails, read the data from the TUF repo. + } else if let Ok(remote_data) = read_remote_target().await { + debug!("{}: reading from remote", name.raw()); + remote_data.to_vec() + } else { + return Err(SigstoreError::TufTargetNotFoundError(name.raw().to_owned())); + }; - serde_json::from_slice(&data[..]).map_err(SigstoreError::from) - } + // Get metadata (hash) of the target and update the disk copy if it doesn't match. + let Some(target) = repository.targets().signed.targets.get(&name) else { + return Err(SigstoreError::TufMetadataError(format!( + "couldn't get metadata for {}", + name.raw() + ))); + }; - self.trusted_root - .get_or_try_init(|| async { - init_trusted_root(&self.repository, self.checkout_dir.as_ref()).await - }) - .await - } + let data = if Sha256::digest(&data)[..] != target.hashes.sha256[..] { + debug!("{}: out of date", name.raw()); + read_remote_target().await?.to_vec() + } else { + data + }; - /// Prefetches trust materials. - /// - /// [TrustRoot::fulcio_certs()] and [TrustRoot::rekor_keys()] on [SigstoreTrustRoot] lazily - /// fetches the requested data, which is problematic for async callers. Those callers should - /// use this method to fetch the trust root ahead of time. - /// - /// ```rust - /// # use sigstore::trust::sigstore::SigstoreTrustRoot; - /// # use sigstore::errors::Result; - /// # #[tokio::main] - /// # async fn main() -> std::result::Result<(), anyhow::Error> { - /// let repo: Result = SigstoreTrustRoot::new(None).await?.prefetch().await; - /// // Now, get Fulcio and Rekor trust roots with the returned `SigstoreRootTrust` - /// # Ok(()) - /// # } - /// ``` - pub async fn prefetch(self) -> Result { - let _ = self.trusted_root().await?; - Ok(self) + // Write our updated data back to the disk. + if let Some(local_path) = local_path { + std::fs::write(local_path, &data)?; + } + + Ok(data) } #[inline] fn tlog_keys(tlogs: &[TransparencyLogInstance]) -> impl Iterator { tlogs .iter() - .filter(|key| is_timerange_valid(key.public_key.valid_for.as_ref(), false)) - .filter_map(|key| key.public_key.raw_bytes.as_ref()) + .filter_map(|tlog| tlog.public_key.as_ref()) + .filter(|key| is_timerange_valid(key.valid_for.as_ref(), false)) + .filter_map(|key| key.raw_bytes.as_ref()) .map(|key_bytes| key_bytes.as_slice()) } @@ -147,27 +150,25 @@ impl SigstoreTrustRoot { allow_expired: bool, ) -> impl Iterator { cas.iter() - .filter(move |ca| is_timerange_valid(Some(&ca.valid_for), allow_expired)) - .flat_map(|ca| ca.cert_chain.certificates.iter()) + .filter(move |ca| is_timerange_valid(ca.valid_for.as_ref(), allow_expired)) + .flat_map(|ca| ca.cert_chain.as_ref()) + .flat_map(|chain| chain.certificates.iter()) .map(|cert| cert.raw_bytes.as_slice()) } } -#[cfg(not(target_arch = "wasm32"))] -#[async_trait] impl crate::trust::TrustRoot for SigstoreTrustRoot { /// Fetch Fulcio certificates from the given TUF repository or reuse /// the local cache if its contents are not outdated. /// /// The contents of the local cache are updated when they are outdated. - #[cfg(not(target_arch = "wasm32"))] - async fn fulcio_certs(&self) -> Result> { - let root = self.trusted_root().await?; - + fn fulcio_certs(&self) -> Result> { // Allow expired certificates: they may have been active when the // certificate was used to sign. - let certs = Self::ca_keys(&root.certificate_authorities, true); - let certs: Vec<_> = certs.map(CertificateDer::from).collect(); + let certs = Self::ca_keys(&self.trusted_root.certificate_authorities, true); + let certs: Vec<_> = certs + .map(|c| CertificateDer::from(c).into_owned()) + .collect(); if certs.is_empty() { Err(SigstoreError::TufMetadataError( @@ -182,9 +183,8 @@ impl crate::trust::TrustRoot for SigstoreTrustRoot { /// the local cache if it's not outdated. /// /// The contents of the local cache are updated when they are outdated. - async fn rekor_keys(&self) -> Result> { - let root = self.trusted_root().await?; - let keys: Vec<_> = Self::tlog_keys(&root.tlogs).collect(); + fn rekor_keys(&self) -> Result> { + let keys: Vec<_> = Self::tlog_keys(&self.trusted_root.tlogs).collect(); if keys.len() != 1 { Err(SigstoreError::TufMetadataError( @@ -194,133 +194,142 @@ impl crate::trust::TrustRoot for SigstoreTrustRoot { Ok(keys) } } + + /// Fetch CTFE public keys from the given TUF repository or reuse + /// the local cache if it's not outdated. + /// + /// The contents of the local cache are updated when they are outdated. + fn ctfe_keys(&self) -> Result> { + let keys: Vec<_> = Self::tlog_keys(&self.trusted_root.ctlogs).collect(); + + if keys.is_empty() { + Err(SigstoreError::TufMetadataError( + "CTFE keys not found".into(), + )) + } else { + Ok(keys) + } + } } /// Given a `range`, checks that the the current time is not before `start`. If /// `allow_expired` is `false`, also checks that the current time is not after /// `end`. fn is_timerange_valid(range: Option<&TimeRange>, allow_expired: bool) -> bool { - let time = chrono::Utc::now(); + let now = chrono::Utc::now().timestamp(); - match range { + let start = range.and_then(|r| r.start.as_ref()).map(|t| t.seconds); + let end = range.and_then(|r| r.end.as_ref()).map(|t| t.seconds); + + match (start, end) { // If there was no validity period specified, the key is always valid. - None => true, + (None, _) => true, // Active: if the current time is before the starting period, we are not yet valid. - Some(range) if time < range.start => false, - // If we want Expired keys, then the key is valid at this point. + (Some(start), _) if now < start => false, + // If we want Expired keys, then we don't need to check the end. _ if allow_expired => true, - // Otherwise, check that we are in range if the range has an end. - Some(range) => match range.end { - None => true, - Some(end) => time <= end, - }, + // If there is no expiry date, the key is valid. + (_, None) => true, + // If we have an expiry date, check it. + (_, Some(end)) => now <= end, } } -/// Download a file stored inside of a TUF repository, try to reuse a local -/// cache when possible. -/// -/// * `repository`: TUF repository holding the file -/// * `target_name`: TUF representation of the file to be downloaded -/// * `local_file`: location where the file should be downloaded -/// -/// This function will reuse the local copy of the file if contents -/// didn't change. -/// This check is done by comparing the digest of the local file, if found, -/// with the digest reported inside of the TUF repository metadata. -/// -/// **Note well:** the `local_file` is updated whenever its contents are -/// outdated. -async fn fetch_target_or_reuse_local_cache( - repository: &tough::Repository, - target_name: &TargetName, - local_file: Option<&PathBuf>, -) -> Result> { - let (local_file_outdated, local_file_contents) = if let Some(path) = local_file { - is_local_file_outdated(repository, target_name, path) - } else { - Ok((true, None)) - }?; - - let data = if local_file_outdated { - let data = fetch_target(repository, target_name).await?; - if let Some(path) = local_file { - // update the local file to have latest data from the TUF repo - fs::write(path, data.clone())?; - } - data - } else { - local_file_contents - .expect("local file contents to not be 'None'") - .as_bytes() - .to_owned() - }; - - Ok(data) -} +#[cfg(test)] +mod tests { + use super::*; + use rstest::{fixture, rstest}; + use std::fs; + use std::path::Path; + use std::time::SystemTime; + use tempfile::TempDir; -/// Download a file from a TUF repository -async fn fetch_target(repository: &tough::Repository, target_name: &TargetName) -> Result> { - match repository - .read_target(target_name) - .await - .map_err(Box::new)? - { - None => Err(SigstoreError::TufTargetNotFoundError( - target_name.raw().to_string(), - )), - Some(mut stream) => { - let mut data = vec![]; - while let Some(d) = stream.next().await { - let mut d = Into::>::into(d.map_err(Box::new)?); - data.append(&mut d); - } - Ok(data) + fn verify(root: &SigstoreTrustRoot, cache_dir: Option<&Path>) { + if let Some(cache_dir) = cache_dir { + assert!( + cache_dir.join("trusted_root.json").exists(), + "the trusted root was not cached" + ); } + + assert!( + root.fulcio_certs().is_ok_and(|v| !v.is_empty()), + "no Fulcio certs established" + ); + assert!( + root.rekor_keys().is_ok_and(|v| !v.is_empty()), + "no Rekor keys established" + ); + assert!( + root.ctfe_keys().is_ok_and(|v| !v.is_empty()), + "no CTFE keys established" + ); } -} -/// Compares the checksum of a local file, with the digest reported inside of -/// TUF repository metadata -fn is_local_file_outdated( - repository: &tough::Repository, - target_name: &TargetName, - local_file: &Path, -) -> Result<(bool, Option)> { - let target = repository - .targets() - .signed - .targets - .get(target_name) - .ok_or_else(|| SigstoreError::TufTargetNotFoundError(target_name.raw().to_string()))?; - - if local_file.exists() { - let data = fs::read_to_string(local_file)?; - let local_checksum = Sha256::digest(data.clone()); - let expected_digest: Vec = target.hashes.sha256.to_vec(); - - if local_checksum.as_slice() == expected_digest.as_slice() { - // local data is not outdated - Ok((false, Some(data))) - } else { - Ok((true, None)) - } - } else { - Ok((true, None)) + #[fixture] + fn cache_dir() -> TempDir { + TempDir::new().expect("cannot create temp cache dir") } -} -#[cfg(test)] -mod tests { - use crate::trust::sigstore::SigstoreTrustRoot; + async fn trust_root(cache: Option<&Path>) -> SigstoreTrustRoot { + SigstoreTrustRoot::new(cache) + .await + .expect("failed to construct SigstoreTrustRoot") + } + #[rstest] #[tokio::test] - async fn prefetch() { - let _repo = SigstoreTrustRoot::new(None) - .await - .expect("initialize SigstoreRepository") - .prefetch() - .await - .expect("prefetch"); + async fn trust_root_fetch(#[values(None, Some(cache_dir()))] cache: Option) { + let cache = cache.as_ref().map(|t| t.path()); + let root = trust_root(cache).await; + + verify(&root, cache); + } + + #[rstest] + #[tokio::test] + async fn trust_root_outdated(cache_dir: TempDir) { + let trusted_root_path = cache_dir.path().join("trusted_root.json"); + let outdated_data = b"fake trusted root"; + fs::write(&trusted_root_path, outdated_data) + .expect("failed to write to trusted root cache"); + + let cache = Some(cache_dir.path()); + let root = trust_root(cache).await; + verify(&root, cache); + + let data = fs::read(&trusted_root_path).expect("failed to read from trusted root cache"); + assert_ne!(data, outdated_data, "TUF cache was not properly updated"); + } + + #[test] + fn test_is_timerange_valid() { + fn range_from(start: i64, end: i64) -> TimeRange { + let base = chrono::Utc::now(); + let start: SystemTime = (base + chrono::TimeDelta::seconds(start)).into(); + let end: SystemTime = (base + chrono::TimeDelta::seconds(end)).into(); + + TimeRange { + start: Some(start.into()), + end: Some(end.into()), + } + } + + assert!(is_timerange_valid(None, true)); + assert!(is_timerange_valid(None, false)); + + // Test lower bound conditions + + // Valid: 1 ago, 1 from now + assert!(is_timerange_valid(Some(&range_from(-1, 1)), false)); + // Invalid: 1 from now, 1 from now + assert!(!is_timerange_valid(Some(&range_from(1, 1)), false)); + + // Test upper bound conditions + + // Invalid: 1 ago, 1 ago + assert!(!is_timerange_valid(Some(&range_from(-1, -1)), false)); + // Valid: 1 ago, 1 ago + assert!(is_timerange_valid(Some(&range_from(-1, -1)), true)) } } diff --git a/src/trust/sigstore/repository_helper.rs b/src/trust/sigstore/repository_helper.rs deleted file mode 100644 index a581619638..0000000000 --- a/src/trust/sigstore/repository_helper.rs +++ /dev/null @@ -1,414 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use sha2::{Digest, Sha256}; -use std::fs; -use std::io::Read; -use std::path::{Path, PathBuf}; -use tough::{RepositoryLoader, TargetName}; -use url::Url; - -use super::{ - super::errors::{Result, SigstoreError}, - constants::{SIGSTORE_FULCIO_CERT_TARGET_REGEX, SIGSTORE_REKOR_PUB_KEY_TARGET}, -}; - -pub(crate) struct RepositoryHelper { - repository: tough::Repository, - checkout_dir: Option, -} - -impl RepositoryHelper { - pub(crate) fn new( - root: R, - metadata_base: Url, - target_base: Url, - checkout_dir: Option<&Path>, - ) -> Result - where - R: Read, - { - let repository = RepositoryLoader::new(root, metadata_base, target_base) - .expiration_enforcement(tough::ExpirationEnforcement::Safe) - .load() - .map_err(Box::new)?; - - Ok(Self { - repository, - checkout_dir: checkout_dir.map(|s| s.to_owned()), - }) - } - - /// Fetch Fulcio certificates from the given TUF repository or reuse - /// the local cache if its contents are not outdated. - /// - /// The contents of the local cache are updated when they are outdated. - pub(crate) fn fulcio_certs(&self) -> Result> { - let fulcio_target_names = self.fulcio_cert_target_names(); - let mut certs = vec![]; - - for fulcio_target_name in &fulcio_target_names { - let local_fulcio_path = self - .checkout_dir - .as_ref() - .map(|d| Path::new(d).join(fulcio_target_name.raw())); - - let cert_data = fetch_target_or_reuse_local_cache( - &self.repository, - fulcio_target_name, - local_fulcio_path.as_ref(), - )?; - certs.push(crate::registry::Certificate { - data: cert_data, - encoding: crate::registry::CertificateEncoding::Pem, - }); - } - Ok(certs) - } - - fn fulcio_cert_target_names(&self) -> Vec { - self.repository - .targets() - .signed - .targets_iter() - .filter_map(|(target_name, _target)| { - if SIGSTORE_FULCIO_CERT_TARGET_REGEX.is_match(target_name.raw()) { - Some(target_name.clone()) - } else { - None - } - }) - .collect() - } - - /// Fetch Rekor public key from the given TUF repository or reuse - /// the local cache if it's not outdated. - /// - /// The contents of the local cache are updated when they are outdated. - pub(crate) fn rekor_pub_key(&self) -> Result> { - let rekor_target_name = TargetName::new(SIGSTORE_REKOR_PUB_KEY_TARGET).map_err(Box::new)?; - - let local_rekor_path = self - .checkout_dir - .as_ref() - .map(|d| Path::new(d).join(SIGSTORE_REKOR_PUB_KEY_TARGET)); - - fetch_target_or_reuse_local_cache( - &self.repository, - &rekor_target_name, - local_rekor_path.as_ref(), - ) - } -} - -/// Download a file stored inside of a TUF repository, try to reuse a local -/// cache when possible. -/// -/// * `repository`: TUF repository holding the file -/// * `target_name`: TUF representation of the file to be downloaded -/// * `local_file`: location where the file should be downloaded -/// -/// This function will reuse the local copy of the file if contents -/// didn't change. -/// This check is done by comparing the digest of the local file, if found, -/// with the digest reported inside of the TUF repository metadata. -/// -/// **Note well:** the `local_file` is updated whenever its contents are -/// outdated. -fn fetch_target_or_reuse_local_cache( - repository: &tough::Repository, - target_name: &TargetName, - local_file: Option<&PathBuf>, -) -> Result> { - let (local_file_outdated, local_file_contents) = if let Some(path) = local_file { - is_local_file_outdated(repository, target_name, path) - } else { - Ok((true, None)) - }?; - - let data = if local_file_outdated { - let data = fetch_target(repository, target_name)?; - if let Some(path) = local_file { - // update the local file to have latest data from the TUF repo - fs::write(path, data.clone())?; - } - data - } else { - local_file_contents - .expect("local file contents to not be 'None'") - .as_bytes() - .to_owned() - }; - - Ok(data) -} - -/// Download a file from a TUF repository -fn fetch_target(repository: &tough::Repository, target_name: &TargetName) -> Result> { - let data: Vec; - match repository.read_target(target_name).map_err(Box::new)? { - None => Err(SigstoreError::TufTargetNotFoundError( - target_name.raw().to_string(), - )), - Some(reader) => { - data = read_to_end(reader)?; - Ok(data) - } - } -} - -/// Compares the checksum of a local file, with the digest reported inside of -/// TUF repository metadata -fn is_local_file_outdated( - repository: &tough::Repository, - target_name: &TargetName, - local_file: &Path, -) -> Result<(bool, Option)> { - let target = repository - .targets() - .signed - .targets - .get(target_name) - .ok_or_else(|| SigstoreError::TufTargetNotFoundError(target_name.raw().to_string()))?; - - if local_file.exists() { - let data = fs::read_to_string(local_file)?; - let local_checksum = Sha256::digest(data.clone()); - let expected_digest: Vec = target.hashes.sha256.to_vec(); - - if local_checksum.as_slice() == expected_digest.as_slice() { - // local data is not outdated - Ok((false, Some(data))) - } else { - Ok((true, None)) - } - } else { - Ok((true, None)) - } -} - -/// Gets the goods from a read and makes a Vec -fn read_to_end(mut reader: R) -> Result> { - let mut v = Vec::new(); - reader.read_to_end(&mut v)?; - Ok(v) -} - -#[cfg(test)] -mod tests { - use super::super::constants::*; - use super::*; - use std::path::PathBuf; - use tempfile::TempDir; - - /// Returns the path to our test data directory - fn test_data() -> PathBuf { - PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("tests") - .join("data") - } - - fn local_tuf_repo() -> Result { - let metadata_base_path = test_data().join("repository"); - let targets_base_path = metadata_base_path.join("targets"); - - let metadata_base_url = format!( - "file://{}", - metadata_base_path - .to_str() - .ok_or_else(|| SigstoreError::UnexpectedError(String::from( - "Cannot convert metadata_base_path into a str" - )))? - ); - let metadata_base_url = url::Url::parse(&metadata_base_url).map_err(|_| { - SigstoreError::UnexpectedError(String::from( - "Cannot convert metadata_base_url into a URL", - )) - })?; - - let target_base_url = format!( - "file://{}", - targets_base_path - .to_str() - .ok_or_else(|| SigstoreError::UnexpectedError(String::from( - "Cannot convert targets_base_path into a str" - )))? - ); - let target_base_url = url::Url::parse(&target_base_url).map_err(|_| { - SigstoreError::UnexpectedError(String::from( - "Cannot convert targets_base_url into a URL", - )) - })?; - // It's fine to ignore timestamp.json expiration inside of test env - let repo = - RepositoryLoader::new(SIGSTORE_ROOT.as_bytes(), metadata_base_url, target_base_url) - .expiration_enforcement(tough::ExpirationEnforcement::Unsafe) - .load() - .map_err(Box::new)?; - Ok(repo) - } - - #[test] - fn get_files_without_using_local_cache() { - let repository = local_tuf_repo().expect("Local TUF repo should not fail"); - let helper = RepositoryHelper { - repository, - checkout_dir: None, - }; - - let mut actual = helper.fulcio_certs().expect("fulcio certs cannot be read"); - actual.sort(); - let mut expected: Vec = - ["fulcio.crt.pem", "fulcio_v1.crt.pem"] - .iter() - .map(|filename| { - let data = fs::read( - test_data() - .join("repository") - .join("targets") - .join(filename), - ) - .unwrap_or_else(|_| panic!("cannot read {} from test data", filename)); - crate::registry::Certificate { - data, - encoding: crate::registry::CertificateEncoding::Pem, - } - }) - .collect(); - expected.sort(); - - assert_eq!( - actual, expected, - "The fulcio cert read from the TUF repository is not what was expected" - ); - - let actual = helper.rekor_pub_key().expect("rekor key cannot be read"); - let expected = fs::read( - test_data() - .join("repository") - .join("targets") - .join("rekor.pub"), - ) - .expect("cannot read rekor key from test data"); - - assert_eq!( - actual, expected, - "The rekor key read from the TUF repository is not what was expected" - ); - } - - #[test] - fn download_files_to_local_cache() { - let cache_dir = TempDir::new().expect("Cannot create temp cache dir"); - - let repository = local_tuf_repo().expect("Local TUF repo should not fail"); - let helper = RepositoryHelper { - repository, - checkout_dir: Some(cache_dir.path().to_path_buf()), - }; - - let mut actual = helper.fulcio_certs().expect("fulcio certs cannot be read"); - actual.sort(); - let mut expected: Vec = - ["fulcio.crt.pem", "fulcio_v1.crt.pem"] - .iter() - .map(|filename| { - let data = fs::read( - test_data() - .join("repository") - .join("targets") - .join(filename), - ) - .unwrap_or_else(|_| panic!("cannot read {} from test data", filename)); - crate::registry::Certificate { - data, - encoding: crate::registry::CertificateEncoding::Pem, - } - }) - .collect(); - expected.sort(); - - assert_eq!( - actual, expected, - "The fulcio cert read from the cache dir is not what was expected" - ); - - let expected = helper.rekor_pub_key().expect("rekor key cannot be read"); - let actual = fs::read(cache_dir.path().join("rekor.pub")) - .expect("cannot read rekor key from cache dir"); - - assert_eq!( - actual, expected, - "The rekor key read from the cache dir is not what was expected" - ); - } - - #[test] - fn update_local_cache() { - let cache_dir = TempDir::new().expect("Cannot create temp cache dir"); - - // put some outdated files inside of the cache - for filename in &["fulcio.crt.pem", "fulcio_v1.crt.pem"] { - fs::write(cache_dir.path().join(filename), b"fake fulcio") - .expect("Cannot write file to cache dir"); - } - fs::write( - cache_dir.path().join(SIGSTORE_REKOR_PUB_KEY_TARGET), - b"fake rekor", - ) - .expect("Cannot write file to cache dir"); - - let repository = local_tuf_repo().expect("Local TUF repo should not fail"); - let helper = RepositoryHelper { - repository, - checkout_dir: Some(cache_dir.path().to_path_buf()), - }; - - let mut actual = helper.fulcio_certs().expect("fulcio certs cannot be read"); - actual.sort(); - let mut expected: Vec = - ["fulcio.crt.pem", "fulcio_v1.crt.pem"] - .iter() - .map(|filename| { - let data = fs::read( - test_data() - .join("repository") - .join("targets") - .join(filename), - ) - .unwrap_or_else(|_| panic!("cannot read {} from test data", filename)); - crate::registry::Certificate { - data, - encoding: crate::registry::CertificateEncoding::Pem, - } - }) - .collect(); - expected.sort(); - - assert_eq!( - actual, expected, - "The fulcio cert read from the TUF repository is not what was expected" - ); - - let expected = helper.rekor_pub_key().expect("rekor key cannot be read"); - let actual = fs::read(cache_dir.path().join("rekor.pub")) - .expect("cannot read rekor key from cache dir"); - - assert_eq!( - actual, expected, - "The rekor key read from the cache dir is not what was expected" - ); - } -} diff --git a/src/trust/sigstore/trustroot.rs b/src/trust/sigstore/trustroot.rs deleted file mode 100644 index aeb321fd92..0000000000 --- a/src/trust/sigstore/trustroot.rs +++ /dev/null @@ -1,194 +0,0 @@ -#![allow(dead_code)] - -// HACK(jl): protobuf-specs schemas are currently compiled for direct dependencies of the Bundle schema. -// See note https://github.com/sigstore/protobuf-specs/blob/main/gen/pb-rust/src/lib.rs#L1-L23 -// HACK(ap): We should probably use definitions from sigstore-protobuf-specs, but -// the autogenerated definitions are unergonomic. Declare it locally here. - -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_with::base64::Base64; - -use serde_with::serde_as; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[allow(non_camel_case_types)] -/// Only a subset of the secure hash standard algorithms are supported. -/// See for more -/// details. -/// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force -/// any proto JSON serialization to emit the used hash algorithm, as default -/// option is to *omit* the default value of an enum (which is the first -/// value, represented by '0'. -pub(crate) enum HashAlgorithm { - HASH_ALGORITHM_UNSPECIFIED = 0, - SHA2_256 = 1, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[allow(non_camel_case_types)] -/// Details of a specific public key, capturing the the key encoding method, -/// and signature algorithm. -/// To avoid the possibility of contradicting formats such as PKCS1 with -/// ED25519 the valid permutations are listed as a linear set instead of a -/// cartesian set (i.e one combined variable instead of two, one for encoding -/// and one for the signature algorithm). -pub(crate) enum PublicKeyDetails { - PUBLIC_KEY_DETAILS_UNSPECIFIED = 0, - // RSA - PKCS1_RSA_PKCS1V5 = 1, // See RFC8017 - PKCS1_RSA_PSS = 2, // See RFC8017 - PKIX_RSA_PKCS1V5 = 3, - PKIX_RSA_PSS = 4, - // ECDSA - PKIX_ECDSA_P256_SHA_256 = 5, // See NIST FIPS 186-4 - PKIX_ECDSA_P256_HMAC_SHA_256 = 6, // See RFC6979 - // Ed 25519 - PKIX_ED25519 = 7, // See RFC8032 -} - -#[serde_as] -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// LogId captures the identity of a transparency log. -pub(crate) struct LogId { - #[serde_as(as = "Base64")] - pub key_id: Vec, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// The time range is closed and includes both the start and end times, -/// (i.e., [start, end]). -/// End is optional to be able to capture a period that has started but -/// has no known end. -pub(crate) struct TimeRange { - pub start: DateTime, - pub end: Option>, -} - -#[serde_as] -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -pub(crate) struct PublicKey { - #[serde_as(as = "Option")] - pub raw_bytes: Option>, - pub key_details: PublicKeyDetails, - pub valid_for: Option, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -pub(crate) struct DistinguishedName { - pub organization: String, - pub common_name: String, -} - -#[serde_as] -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -pub(crate) struct X509Certificate { - #[serde_as(as = "Base64")] - pub raw_bytes: Vec, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// A chain of X.509 certificates. -pub(crate) struct X509CertificateChain { - pub certificates: Vec, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// TransparencyLogInstance describes the immutable parameters from a -/// transparency log. -/// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters -/// for more details. -/// The included parameters are the minimal set required to identify a log, -/// and verify an inclusion proof/promise. -pub(crate) struct TransparencyLogInstance { - pub base_url: String, - pub hash_algorithm: HashAlgorithm, - pub public_key: PublicKey, - pub log_id: LogId, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// CertificateAuthority enlists the information required to identify which -/// CA to use and perform signature verification. -pub(crate) struct CertificateAuthority { - pub subject: DistinguishedName, - pub uri: Option, - pub cert_chain: X509CertificateChain, - pub valid_for: TimeRange, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "camelCase")] -/// TrustedRoot describes the client's complete set of trusted entities. -/// How the TrustedRoot is populated is not specified, but can be a -/// combination of many sources such as TUF repositories, files on disk etc. -/// -/// The TrustedRoot is not meant to be used for any artifact verification, only -/// to capture the complete/global set of trusted verification materials. -/// When verifying an artifact, based on the artifact and policies, a selection -/// of keys/authorities are expected to be extracted and provided to the -/// verification function. This way the set of keys/authorities can be kept to -/// a minimal set by the policy to gain better control over what signatures -/// that are allowed. -/// -/// The embedded transparency logs, CT logs, CAs and TSAs MUST include any -/// previously used instance -- otherwise signatures made in the past cannot -/// be verified. -/// The currently used instances MUST NOT have their 'end' timestamp set in -/// their 'valid_for' attribute for easy identification. -/// All the listed instances SHOULD be sorted by the 'valid_for' in ascending -/// order, that is, the oldest instance first and the current instance last. -pub(crate) struct TrustedRoot { - pub media_type: String, - pub tlogs: Vec, - pub certificate_authorities: Vec, - pub ctlogs: Vec, - pub timestamp_authorities: Vec, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn tuf_serde_as_base64() { - let data = X509Certificate { - raw_bytes: b"Hello World".to_vec(), // NOTE(jl): value not representative - }; - let json = serde_json::json!({"rawBytes": "SGVsbG8gV29ybGQ=",}); - - assert_eq!(json, serde_json::to_value(&data).unwrap()); - assert_eq!(data, serde_json::from_value(json).unwrap()); - } - - #[test] - fn tuf_serde_as_nested_structure_base64() { - let data = PublicKey { - raw_bytes: Some(b"Hello World".to_vec()), - key_details: PublicKeyDetails::PKIX_ED25519, - valid_for: Some(TimeRange { - start: DateTime::from_timestamp(1_500_000_000, 0).unwrap(), - end: None, - }), - }; - let json = serde_json::json!({ - "rawBytes": "SGVsbG8gV29ybGQ=", - "keyDetails": "PKIX_ED25519", - "validFor": { - "start": "2017-07-14T02:40:00Z", - "end": None::> - } - }); - - assert_eq!(json, serde_json::to_value(&data).unwrap()); - assert_eq!(data, serde_json::from_value(json).unwrap()); - } -} diff --git a/tests/conformance/Cargo.toml b/tests/conformance/Cargo.toml index 6c0c5d16a9..1f2cc3d92e 100644 --- a/tests/conformance/Cargo.toml +++ b/tests/conformance/Cargo.toml @@ -8,7 +8,10 @@ license = "Apache-2.0" [dependencies] clap = { version = "4.0.8", features = ["derive"] } -sigstore = { path = "../../" } +anyhow = "1.0.75" +serde_json = "1.0.107" +sigstore = { path = "../../", default-features = false, features = ["bundle", "sigstore-trust-root", "full-native-tls"] } +tracing-subscriber = "0.3" [[bin]] name = "sigstore" diff --git a/tests/conformance/conformance.rs b/tests/conformance/conformance.rs index c52c822fe1..cccc0fcf7c 100644 --- a/tests/conformance/conformance.rs +++ b/tests/conformance/conformance.rs @@ -16,7 +16,14 @@ // CLI implemented to specification: // https://github.com/sigstore/sigstore-conformance/blob/main/docs/cli_protocol.md +use std::{fs, process::exit}; + use clap::{Parser, Subcommand}; +use sigstore::{ + bundle::sign::SigningContext, + bundle::verify::{blocking::Verifier, policy}, + oauth::IdentityToken, +}; #[derive(Parser, Debug)] struct Cli { @@ -105,5 +112,63 @@ struct VerifyBundle { } fn main() { + tracing_subscriber::fmt::init(); let cli = Cli::parse(); + + let result = match cli.command { + Commands::SignBundle(args) => sign_bundle(args), + Commands::VerifyBundle(args) => verify_bundle(args), + _ => unimplemented!("sig/cert commands"), + }; + + if let Err(error) = result { + eprintln!("Operation failed:\n{error:?}"); + exit(-1); + } + + eprintln!("Operation succeeded!"); +} + +fn sign_bundle(args: SignBundle) -> anyhow::Result<()> { + let SignBundle { + identity_token, + bundle, + artifact, + } = args; + let identity_token = IdentityToken::try_from(identity_token.as_str())?; + let bundle = fs::File::create(bundle)?; + let mut artifact = fs::File::open(artifact)?; + + let context = SigningContext::production()?; + let signer = context.blocking_signer(identity_token); + + let signing_artifact = signer?.sign(&mut artifact)?; + let bundle_data = signing_artifact.to_bundle(); + + serde_json::to_writer(bundle, &bundle_data)?; + + Ok(()) +} + +fn verify_bundle(args: VerifyBundle) -> anyhow::Result<()> { + let VerifyBundle { + bundle, + certificate_identity, + certificate_oidc_issuer, + artifact, + } = args; + let bundle = fs::File::open(bundle)?; + let mut artifact = fs::File::open(artifact)?; + + let bundle: sigstore::bundle::Bundle = serde_json::from_reader(bundle)?; + let verifier = Verifier::production()?; + + verifier.verify( + &mut artifact, + bundle, + &policy::Identity::new(certificate_identity, certificate_oidc_issuer), + true, + )?; + + Ok(()) } diff --git a/tests/data/repository/1.root.json b/tests/data/repository/1.root.json deleted file mode 100644 index dcc71f963a..0000000000 --- a/tests/data/repository/1.root.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "30450221008a35d51da0f845301a5eac98ad0df00a934f59b709c1eaf81c86be734d9356f80220742942325599749800f52675f6efe124345980a2a636c0dc76f9caf9fc3123b0" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "3045022100ef9157ece2a09baec1eab80adfc00b04da20b1f9a0d1b47c5dabc4506719ef2c022074f72acd57398e4ddc8c2a5040df902961e9615dca48f3fbe38cbb506e500066" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "30450220420fdc9a09cd069b8b15fd8db9cedf7d0dee75871bd1cfee77c926d4120a770002210097553b5ad0d6b4a13902ed37509638bb63a9009f78230cd56c802909ffbfead7" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304502202aaf32e66f90752f658672b085ecfe45cc1ad31ee6cf5c9ad05f3267685f8d88022100b5df02acdaa371123db9d7a42219553fe079b230b168833e951be7ee56ded347" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "304402205d420c7d05c58980c1c9f7d221f53b5334aae27a447d2a91c2ceddd685269749022039ec83e51f8e1779d7f0142dfa4a5bbecfe327fc0b91b7416090fea2416fd53a" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": false, - "expires": "2021-12-18T13:28:12.99008-06:00", - "keys": { - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cbc5cab2684160323c25cd06c3307178a6b1d1c9b949328453ae473c5ba7527e35b13f298b41633382241f3fd8526c262d43b45adee5c618fa0642c82b8a9803" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04a71aacd835dc170ba6db3fa33a1a33dee751d4f8b0217b805b9bd3242921ee93672fdcfd840576c5bb0dc0ed815edf394c1ee48c2b5e02485e59bfc512f3adc7" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04117b33dd265715bf23315e368faa499728db8d1f0a377070a1c7b1aba2cc21be6ab1628e42f2cdd7a35479f2dce07b303a8ba646c55569a8d2a504ba7e86e447" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cc1cd53a61c23e88cc54b488dfae168a257c34fac3e88811c55962b24cffbfecb724447999c54670e365883716302e49da57c79a33cd3e16f81fbc66f0bcdf48" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "048a78a44ac01099890d787e5e62afc29c8ccb69a70ec6549a6b04033b0a8acbfb42ab1ab9c713d225cdb52b858886cf46c8e90a7f3b9e6371882f370c259e1c5b" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": { - "root": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "snapshot": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "targets": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "timestamp": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - } - }, - "spec_version": "1.0", - "version": 1 - } -} \ No newline at end of file diff --git a/tests/data/repository/2.root.json b/tests/data/repository/2.root.json deleted file mode 100644 index 386ebe62c1..0000000000 --- a/tests/data/repository/2.root.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "3046022100d3ea59490b253beae0926c6fa63f54336dea1ed700555be9f27ff55cd347639c0221009157d1ba012cead81948a4ab777d355451d57f5c4a2d333fc68d2e3f358093c2" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "304502206eaef40564403ce572c6d062e0c9b0aab5e0223576133e081e1b495e8deb9efd02210080fd6f3464d759601b4afec596bbd5952f3a224cd06ed1cdfc3c399118752ba2" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "304502207baace02f56d8e6069f10b6ff098a26e7f53a7f9324ad62cffa0557bdeb9036c022100fb3032baaa090d0040c3f2fd872571c84479309b773208601d65948df87a9720" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304402205180c01905505dd88acd7a2dad979dd75c979b3722513a7bdedac88c6ae8dbeb022056d1ddf7a192f0b1c2c90ff487de2fb3ec9f0c03f66ea937c78d3b6a493504ca" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "3046022100c8806d4647c514d80fd8f707d3369444c4fd1d0812a2d25f828e564c99790e3f022100bb51f12e862ef17a7d3da2ac103bebc5c7e792237006c4cafacd76267b249c2f" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": false, - "expires": "2022-05-11T19:09:02.663975009Z", - "keys": { - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cbc5cab2684160323c25cd06c3307178a6b1d1c9b949328453ae473c5ba7527e35b13f298b41633382241f3fd8526c262d43b45adee5c618fa0642c82b8a9803" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04fa1a3e42f2300cd3c5487a61509348feb1e936920fef2f83b7cd5dbe7ba045f538725ab8f18a666e6233edb7e0db8766c8dc336633449c5e1bbe0c182b02df0b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04a71aacd835dc170ba6db3fa33a1a33dee751d4f8b0217b805b9bd3242921ee93672fdcfd840576c5bb0dc0ed815edf394c1ee48c2b5e02485e59bfc512f3adc7" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04117b33dd265715bf23315e368faa499728db8d1f0a377070a1c7b1aba2cc21be6ab1628e42f2cdd7a35479f2dce07b303a8ba646c55569a8d2a504ba7e86e447" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cc1cd53a61c23e88cc54b488dfae168a257c34fac3e88811c55962b24cffbfecb724447999c54670e365883716302e49da57c79a33cd3e16f81fbc66f0bcdf48" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "048a78a44ac01099890d787e5e62afc29c8ccb69a70ec6549a6b04033b0a8acbfb42ab1ab9c713d225cdb52b858886cf46c8e90a7f3b9e6371882f370c259e1c5b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "044c7793ab74b9ddd713054e587b8d9c75c5f6025633d0fef7ca855ed5b8d5a474b23598fe33eb4a63630d526f74d4bdaec8adcb51993ed65652d651d7c49203eb" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": { - "root": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "snapshot": { - "keyids": [ - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451" - ], - "threshold": 1 - }, - "targets": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "timestamp": { - "keyids": [ - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d" - ], - "threshold": 1 - } - }, - "spec_version": "1.0", - "version": 2 - } -} \ No newline at end of file diff --git a/tests/data/repository/rekor.json b/tests/data/repository/rekor.json deleted file mode 100644 index f86930d537..0000000000 --- a/tests/data/repository/rekor.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "signatures": [ - { - "keyid": "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217", - "sig": "3045022076eadd73f6664bac5cc91f12d3a7ddcdd53f9bde661f147651196ff66e7235d1022100f7b3143792405f9e8a75331a05d4128bdf083de302801e99c3d027919a4b03da" - } - ], - "signed": { - "_type": "targets", - "expires": "2022-05-11T19:10:11Z", - "spec_version": "1.0", - "targets": { - "rekor.0.pub": { - "hashes": { - "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd", - "sha512": "0ae7705e02db33e814329746a4a0e5603c5bdcd91c96d072158d71011a2695788866565a2fec0fe363eb72cbcaeda39e54c5fe8d416daf9f3101fdba4217ef35" - }, - "length": 178 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/tests/data/repository/root.json b/tests/data/repository/root.json deleted file mode 100644 index 386ebe62c1..0000000000 --- a/tests/data/repository/root.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "3046022100d3ea59490b253beae0926c6fa63f54336dea1ed700555be9f27ff55cd347639c0221009157d1ba012cead81948a4ab777d355451d57f5c4a2d333fc68d2e3f358093c2" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "304502206eaef40564403ce572c6d062e0c9b0aab5e0223576133e081e1b495e8deb9efd02210080fd6f3464d759601b4afec596bbd5952f3a224cd06ed1cdfc3c399118752ba2" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "304502207baace02f56d8e6069f10b6ff098a26e7f53a7f9324ad62cffa0557bdeb9036c022100fb3032baaa090d0040c3f2fd872571c84479309b773208601d65948df87a9720" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304402205180c01905505dd88acd7a2dad979dd75c979b3722513a7bdedac88c6ae8dbeb022056d1ddf7a192f0b1c2c90ff487de2fb3ec9f0c03f66ea937c78d3b6a493504ca" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "3046022100c8806d4647c514d80fd8f707d3369444c4fd1d0812a2d25f828e564c99790e3f022100bb51f12e862ef17a7d3da2ac103bebc5c7e792237006c4cafacd76267b249c2f" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": false, - "expires": "2022-05-11T19:09:02.663975009Z", - "keys": { - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cbc5cab2684160323c25cd06c3307178a6b1d1c9b949328453ae473c5ba7527e35b13f298b41633382241f3fd8526c262d43b45adee5c618fa0642c82b8a9803" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04fa1a3e42f2300cd3c5487a61509348feb1e936920fef2f83b7cd5dbe7ba045f538725ab8f18a666e6233edb7e0db8766c8dc336633449c5e1bbe0c182b02df0b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04a71aacd835dc170ba6db3fa33a1a33dee751d4f8b0217b805b9bd3242921ee93672fdcfd840576c5bb0dc0ed815edf394c1ee48c2b5e02485e59bfc512f3adc7" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04117b33dd265715bf23315e368faa499728db8d1f0a377070a1c7b1aba2cc21be6ab1628e42f2cdd7a35479f2dce07b303a8ba646c55569a8d2a504ba7e86e447" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cc1cd53a61c23e88cc54b488dfae168a257c34fac3e88811c55962b24cffbfecb724447999c54670e365883716302e49da57c79a33cd3e16f81fbc66f0bcdf48" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "048a78a44ac01099890d787e5e62afc29c8ccb69a70ec6549a6b04033b0a8acbfb42ab1ab9c713d225cdb52b858886cf46c8e90a7f3b9e6371882f370c259e1c5b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "044c7793ab74b9ddd713054e587b8d9c75c5f6025633d0fef7ca855ed5b8d5a474b23598fe33eb4a63630d526f74d4bdaec8adcb51993ed65652d651d7c49203eb" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": { - "root": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "snapshot": { - "keyids": [ - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451" - ], - "threshold": 1 - }, - "targets": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "timestamp": { - "keyids": [ - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d" - ], - "threshold": 1 - } - }, - "spec_version": "1.0", - "version": 2 - } -} \ No newline at end of file diff --git a/tests/data/repository/snapshot.json b/tests/data/repository/snapshot.json deleted file mode 100644 index 61636531c6..0000000000 --- a/tests/data/repository/snapshot.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "signatures": [ - { - "keyid": "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451", - "sig": "3046022100f59f6f92d8c61519afd0de0642ff45419ac960954cf412549874c247c6ae509902210085da85c9df818c3072c0b7744b75e92d2ee521402d4bac77c985b8fc6d138e41" - } - ], - "signed": { - "_type": "snapshot", - "expires": "2022-01-05T00:40:06Z", - "meta": { - "rekor.json": { - "hashes": { - "sha256": "a7412a87f8d7b330e0380b19a4a76c00357c39a1aa7f56fd87445d4e12faafe4", - "sha512": "720cb3c42bac50c5bc3cb7076e730301ef29f1893ea52e25f9393fc05851c7a531638c42d9fc992969805982a2bf51d676e33d28a7382ea589b5a9f87474c63f" - }, - "length": 697, - "version": 1 - }, - "root.json": { - "hashes": { - "sha256": "f5ad897c9414cca99629f400ac3585e41bd8ebb44c5af07fb08dd636a9eced9c", - "sha512": "7445ddfdd338ef786c324fc3d68f75be28cb95b7fb581d2a383e3e5dde18aa17029a5636ec0a22e9631931bbcb34057788311718ea41e21e7cdd3c0de13ede42" - }, - "length": 5297, - "version": 2 - }, - "staging.json": { - "hashes": { - "sha256": "c7f32379c2a76f0ec0af84e86794a8f4fe285e44fb62f336d598810dccdc7343", - "sha512": "5462cb15fe5248a12cc12387a732ad43caf42391361f36113ea3d4b7e5e193cdf39fbe91c309c0691134377cb83afeba50cf6d711537d8280ce16ce9cd8752ba" - }, - "length": 399, - "version": 1 - }, - "targets.json": { - "hashes": { - "sha256": "18d10c07c8d6bd7484772b02dcc988d0abf8a0fa379d5893a502410590c17fe6", - "sha512": "c2ba2a84820288997c8fae264776df7b262dde97c4f9e0320ad354879ce5afabd1d43494734fecffd23253442a14cfe217787de8b65cf7fd1f03130b72a0767c" - }, - "length": 4167, - "version": 2 - } - }, - "spec_version": "1.0", - "version": 6 - } -} \ No newline at end of file diff --git a/tests/data/repository/staging.json b/tests/data/repository/staging.json deleted file mode 100644 index 084010de75..0000000000 --- a/tests/data/repository/staging.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "signatures": [ - { - "keyid": "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4", - "sig": "304502204486f7b23eadb69df87776ac7a4938ac75a8a2b2e93c84c05d962373837ea91c022100aaeb0fa587430f49618711bb4bd0c1092637c22c223d03c0f1b5a09baea0ed9f" - } - ], - "signed": { - "_type": "targets", - "expires": "2022-02-11T20:10:16Z", - "spec_version": "1.0", - "targets": {}, - "version": 1 - } -} \ No newline at end of file diff --git a/tests/data/repository/targets.json b/tests/data/repository/targets.json deleted file mode 100644 index b26926a438..0000000000 --- a/tests/data/repository/targets.json +++ /dev/null @@ -1,117 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "3046022100cc1b2ed390e75a112c0fdd6bcbd8bb775300a410f5737ae39996b1858753c8e4022100b591f73370e9378914fb2fab837f700661abd1a74c680f139f6164ec12cb538f" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "3045022100bc6c45a125e45507339af96aa63983e847565c769f20d7d71bcd2deb7bd36ea902202bf6bd3b76d434c318287899e53f64b4dc178eb0ba403080f1c4fba88a2177ca" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "304502210085d5bc8a158d31536b4e76cddceef25185c7abbe9091b84f5f2b0d615d9b4ee90220136a36fed2d5986c2519b7d165556f20dfe41fddececda48dffa8dec5258cb95" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304402202fe73a61dfe05b4202bc50f66e52bba3d3475134434dab9576735caed659b03c0220449755a87f4dab9961566f10477204637b2415f87e162b58a23b13327dec53e3" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "304602210091f453ef75c5178299175734355a65a2fc2d0ee137410f46ba8439d99037fc08022100fc800d15f0b751fa225a77542928f4264835c013054a5c409c674e2ea5a70384" - } - ], - "signed": { - "_type": "targets", - "delegations": { - "keys": { - "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "043463588ae9df33a419d1099761245af52aaf7e638b2047bc0f739a62de9808c50a21ea8a1a273799f857f31a1bcb66e6661dd9d5ac7ac3ca260b0b8130c3fed8" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "041b4b13a6e7110292d284c0dbfc3962a12d2a779a800c99aff59c6afe779296943c75d84aa5bad0be28e4061cf93e0cd3d372d9b2f75ea9f29b907cbccd82006f" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": [ - { - "keyids": [ - "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217" - ], - "name": "rekor", - "paths": [ - "rekor.*.pub" - ], - "terminating": true, - "threshold": 1 - }, - { - "keyids": [ - "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4" - ], - "name": "staging", - "paths": [ - "*" - ], - "terminating": false, - "threshold": 1 - } - ] - }, - "expires": "2022-05-11T19:10:16Z", - "spec_version": "1.0", - "targets": { - "artifact.pub": { - "hashes": { - "sha256": "59ebf97a9850aecec4bc39c1f5c1dc46e6490a6b5fd2a6cacdcac0c3a6fc4cbf", - "sha512": "308fd1d1d95d7f80aa33b837795251cc3e886792982275e062409e13e4e236ffc34d676682aa96fdc751414de99c864bf132dde71581fa651c6343905e3bf988" - }, - "length": 177 - }, - "ctfe.pub": { - "hashes": { - "sha256": "7fcb94a5d0ed541260473b990b99a6c39864c1fb16f3f3e594a5a3cebbfe138a", - "sha512": "4b20747d1afe2544238ad38cc0cc3010921b177d60ac743767e0ef675b915489bd01a36606c0ff83c06448622d7160f0d866c83d20f0c0f44653dcc3f9aa0bd4" - }, - "length": 177 - }, - "fulcio.crt.pem": { - "hashes": { - "sha256": "f360c53b2e13495a628b9b8096455badcb6d375b185c4816d95a5d746ff29908", - "sha512": "0713252a7fd17f7f3ab12f88a64accf2eb14b8ad40ca711d7fe8b4ecba3b24db9e9dffadb997b196d3867b8f9ff217faf930d80e4dab4e235c7fc3f07be69224" - }, - "length": 744 - }, - "fulcio_v1.crt.pem": { - "hashes": { - "sha256": "f989aa23def87c549404eadba767768d2a3c8d6d30a8b793f9f518a8eafd2cf5", - "sha512": "f2e33a6dc208cee1f51d33bbea675ab0f0ced269617497985f9a0680689ee7073e4b6f8fef64c91bda590d30c129b3070dddce824c05bc165ac9802f0705cab6" - }, - "length": 740 - }, - "rekor.pub": { - "hashes": { - "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd", - "sha512": "0ae7705e02db33e814329746a4a0e5603c5bdcd91c96d072158d71011a2695788866565a2fec0fe363eb72cbcaeda39e54c5fe8d416daf9f3101fdba4217ef35" - }, - "length": 178 - } - }, - "version": 2 - } -} \ No newline at end of file diff --git a/tests/data/repository/targets/artifact.pub b/tests/data/repository/targets/artifact.pub deleted file mode 100644 index d6e745bdd0..0000000000 --- a/tests/data/repository/targets/artifact.pub +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEhyQCx0E9wQWSFI9ULGwy3BuRklnt -IqozONbbdbqz11hlRJy9c7SG+hdcFl9jE9uE/dwtuwU2MqU9T/cN0YkWww== ------END PUBLIC KEY----- \ No newline at end of file diff --git a/tests/data/repository/targets/ctfe.pub b/tests/data/repository/targets/ctfe.pub deleted file mode 100644 index 1bb1488c99..0000000000 --- a/tests/data/repository/targets/ctfe.pub +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3Pyu -dDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w== ------END PUBLIC KEY----- \ No newline at end of file diff --git a/tests/data/repository/targets/fulcio.crt.pem b/tests/data/repository/targets/fulcio.crt.pem deleted file mode 100644 index 6a06ff300b..0000000000 --- a/tests/data/repository/targets/fulcio.crt.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAq -MRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIx -MDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUu -ZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSy -A7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0Jcas -taRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6Nm -MGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYE -FMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2u -Su1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJx -Ve/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uup -Hr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ== ------END CERTIFICATE----- \ No newline at end of file diff --git a/tests/data/repository/targets/fulcio_v1.crt.pem b/tests/data/repository/targets/fulcio_v1.crt.pem deleted file mode 100644 index 3afc46bb6e..0000000000 --- a/tests/data/repository/targets/fulcio_v1.crt.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMw -KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y -MTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3Jl -LmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7 -XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxex -X69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92j -YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRY -wB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQ -KsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCM -WP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9 -TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ ------END CERTIFICATE----- \ No newline at end of file diff --git a/tests/data/repository/targets/rekor.0.pub b/tests/data/repository/targets/rekor.0.pub deleted file mode 100644 index 050ef60149..0000000000 --- a/tests/data/repository/targets/rekor.0.pub +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwr -kBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw== ------END PUBLIC KEY----- diff --git a/tests/data/repository/targets/rekor.json b/tests/data/repository/targets/rekor.json deleted file mode 100644 index f86930d537..0000000000 --- a/tests/data/repository/targets/rekor.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "signatures": [ - { - "keyid": "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217", - "sig": "3045022076eadd73f6664bac5cc91f12d3a7ddcdd53f9bde661f147651196ff66e7235d1022100f7b3143792405f9e8a75331a05d4128bdf083de302801e99c3d027919a4b03da" - } - ], - "signed": { - "_type": "targets", - "expires": "2022-05-11T19:10:11Z", - "spec_version": "1.0", - "targets": { - "rekor.0.pub": { - "hashes": { - "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd", - "sha512": "0ae7705e02db33e814329746a4a0e5603c5bdcd91c96d072158d71011a2695788866565a2fec0fe363eb72cbcaeda39e54c5fe8d416daf9f3101fdba4217ef35" - }, - "length": 178 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/tests/data/repository/targets/rekor.pub b/tests/data/repository/targets/rekor.pub deleted file mode 100644 index 050ef60149..0000000000 --- a/tests/data/repository/targets/rekor.pub +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwr -kBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw== ------END PUBLIC KEY----- diff --git a/tests/data/repository/timestamp.json b/tests/data/repository/timestamp.json deleted file mode 100644 index 8cb4f094b7..0000000000 --- a/tests/data/repository/timestamp.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "signatures": [ - { - "keyid": "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d", - "sig": "30440220590dc4d9eb4e3b2745315348c1ea5481f29f981dfd6c2d72bde13256a25e0caf02205704352c828451bf1e41bba154db9ecb4e901b4bc47d721a91fabfb84a48c61f" - } - ], - "signed": { - "_type": "timestamp", - "expires": "2022-01-05T00:40:07Z", - "meta": { - "snapshot.json": { - "hashes": { - "sha256": "e202c20580ac4edc7a52ad2bcbe97c5af557c04463f10f2d9a28e2624e0c8edf", - "sha512": "f0b9f17797fe6d89a745f8fc9a39a073823bc04400307711eebe3b00dfe418e4d1d4419697eee29445c9cd5e03c3e24532d4fb03824d7555ecc0de54bd73ffd1" - }, - "length": 1658, - "version": 6 - } - }, - "spec_version": "1.0", - "version": 6 - } -} \ No newline at end of file