diff --git a/Cargo.lock b/Cargo.lock index b876173fb9..e3f78a8309 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1462,6 +1462,7 @@ dependencies = [ "async-trait", "c-kzg", "hashbrown", + "kona-plasma", "kona-primitives", "lru", "miniz_oxide", @@ -1497,6 +1498,21 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "kona-plasma" +version = "0.0.1" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=e3f2f07)", + "alloy-primitives", + "anyhow", + "async-trait", + "kona-primitives", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + [[package]] name = "kona-preimage" version = "0.0.1" diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index d69829bf5a..34cee60b9d 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -17,6 +17,7 @@ alloy-rlp = { workspace = true, features = ["derive"] } # Local kona-primitives = { path = "../primitives", version = "0.0.1" } +kona-plasma = { path = "../plasma", version = "0.0.1" } # External alloy-sol-types = { version = "0.7.1", default-features = false } @@ -42,6 +43,7 @@ alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f reqwest = { version = "0.12", default-features = false, optional = true } [dev-dependencies] +kona-plasma = { path = "../plasma", version = "0.0.1", features = ["default", "test-utils"] } tokio = { version = "1.37", features = ["full"] } proptest = "1.4.0" tracing-subscriber = "0.3.18" @@ -51,7 +53,7 @@ serde_json = { version = "1.0.116", default-features = false } [features] default = ["serde", "k256"] -serde = ["dep:serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] +serde = ["dep:serde", "kona-plasma/serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] k256 = ["alloy-primitives/k256", "alloy-consensus/k256", "op-alloy-consensus/k256"] online = [ "dep:revm-primitives", diff --git a/crates/derive/src/params.rs b/crates/derive/src/params.rs index e2c8b60cff..697d87f0ae 100644 --- a/crates/derive/src/params.rs +++ b/crates/derive/src/params.rs @@ -30,13 +30,6 @@ pub const CHANNEL_ID_LENGTH: usize = 16; /// [ChannelID] is an opaque identifier for a channel. pub type ChannelID = [u8; CHANNEL_ID_LENGTH]; -/// `keccak256("ConfigUpdate(uint256,uint8,bytes)")` -pub const CONFIG_UPDATE_TOPIC: B256 = - b256!("1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be"); - -/// The initial version of the system config event log. -pub const CONFIG_UPDATE_EVENT_VERSION_0: B256 = B256::ZERO; - /// Frames cannot be larger than 1MB. /// Data transactions that carry frames are generally not larger than 128 KB due to L1 network /// conditions, but we leave space to grow larger anyway (gas limit allows for more data). @@ -45,6 +38,13 @@ pub const MAX_FRAME_LEN: usize = 1000; /// Deposit log event abi signature. pub const DEPOSIT_EVENT_ABI: &str = "TransactionDeposited(address,address,uint256,bytes)"; +/// `keccak256("ConfigUpdate(uint256,uint8,bytes)")` +pub const CONFIG_UPDATE_TOPIC: B256 = + b256!("1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be"); + +/// The initial version of the system config event log. +pub const CONFIG_UPDATE_EVENT_VERSION_0: B256 = B256::ZERO; + /// Deposit event abi hash. /// /// This is the keccak256 hash of the deposit event ABI signature. diff --git a/crates/derive/src/sources/factory.rs b/crates/derive/src/sources/factory.rs index 1d39d4b275..5e5647e940 100644 --- a/crates/derive/src/sources/factory.rs +++ b/crates/derive/src/sources/factory.rs @@ -3,24 +3,34 @@ use crate::{ sources::{BlobSource, CalldataSource, DataSource, PlasmaSource}, traits::{BlobProvider, ChainProvider, DataAvailabilityProvider}, - types::{BlockInfo, RollupConfig}, + types::{BlockID, BlockInfo, RollupConfig}, }; use alloc::{boxed::Box, fmt::Debug}; use alloy_primitives::{Address, Bytes}; use anyhow::{anyhow, Result}; use async_trait::async_trait; +use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; /// A factory for creating a calldata and blob provider. #[derive(Debug, Clone, Copy)] -pub struct DataSourceFactory +pub struct DataSourceFactory where C: ChainProvider + Clone, B: BlobProvider + Clone, + PCP: PlasmaChainProvider + Send + Clone, + PIF: PlasmaInputFetcher + Clone, + I: Iterator + Send + Clone, { /// The chain provider to use for the factory. pub chain_provider: C, + /// The plasma chain provider. + pub plasma_chain_provider: PCP, + /// The plasma iterator. + pub plasma_source: I, /// The blob provider pub blob_provider: B, + /// The plasma input fetcher. + pub plasma_input_fetcher: PIF, /// The ecotone timestamp. pub ecotone_timestamp: Option, /// Whether or not plasma is enabled. @@ -29,16 +39,22 @@ where pub signer: Address, } -impl DataSourceFactory +impl DataSourceFactory where C: ChainProvider + Clone + Debug, B: BlobProvider + Clone + Debug, + PCP: PlasmaChainProvider + Send + Clone + Debug, + PIF: PlasmaInputFetcher + Clone + Debug, + I: Iterator + Send + Clone, { /// Creates a new factory. - pub fn new(provider: C, blobs: B, cfg: &RollupConfig) -> Self { + pub fn new(provider: C, blobs: B, pcp: PCP, pif: PIF, s: I, cfg: &RollupConfig) -> Self { Self { chain_provider: provider, + plasma_chain_provider: pcp, + plasma_source: s, blob_provider: blobs, + plasma_input_fetcher: pif, ecotone_timestamp: cfg.ecotone_time, plasma_enabled: cfg.is_plasma_enabled(), signer: cfg.genesis.system_config.batcher_addr, @@ -47,13 +63,16 @@ where } #[async_trait] -impl DataAvailabilityProvider for DataSourceFactory +impl DataAvailabilityProvider for DataSourceFactory where C: ChainProvider + Send + Sync + Clone + Debug, B: BlobProvider + Send + Sync + Clone + Debug, + PCP: PlasmaChainProvider + Send + Sync + Clone + Debug, + PIF: PlasmaInputFetcher + Send + Sync + Clone + Debug, + I: Iterator + Send + Sync + Clone + Debug, { type Item = Bytes; - type DataIter = DataSource; + type DataIter = DataSource; async fn open_data( &self, @@ -81,7 +100,13 @@ where }); Ok(source) } else if self.plasma_enabled { - Ok(DataSource::Plasma(PlasmaSource::new(self.chain_provider.clone()))) + let id = BlockID { hash: block_ref.hash, number: block_ref.number }; + Ok(DataSource::Plasma(PlasmaSource::new( + self.plasma_chain_provider.clone(), + self.plasma_input_fetcher.clone(), + self.plasma_source.clone(), + id, + ))) } else { Err(anyhow!("No data source available")) } diff --git a/crates/derive/src/sources/plasma.rs b/crates/derive/src/sources/plasma.rs index d3cac8b6c8..7c3f572ed7 100644 --- a/crates/derive/src/sources/plasma.rs +++ b/crates/derive/src/sources/plasma.rs @@ -1,42 +1,284 @@ //! Plasma Data Source use crate::{ - traits::{AsyncIterator, ChainProvider}, - types::StageResult, + traits::AsyncIterator, + types::{ResetError, StageError, StageResult}, }; use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; +use kona_plasma::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{ + decode_keccak256, Keccak256Commitment, PlasmaError, MAX_INPUT_SIZE, TX_DATA_VERSION_1, + }, +}; +use kona_primitives::block::BlockID; /// A plasma data iterator. #[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct PlasmaSource +pub struct PlasmaSource where CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator, { + /// The plasma input fetcher. + input_fetcher: PIF, /// The chain provider to use for the plasma source. chain_provider: CP, - /// The plasma commitment. - commitment: Bytes, - /// The block number. - block_number: u64, - /// Whether the plasma source is open. - open: bool, + /// A source data iterator. + source: I, + /// Keeps track of a pending commitment so we can keep trying to fetch the input. + commitment: Option, + /// The block Id. + id: BlockID, } -impl PlasmaSource { +impl PlasmaSource +where + CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator, +{ /// Instantiates a new plasma data source. - pub fn new(chain_provider: CP) -> Self { - Self { chain_provider, commitment: Bytes::default(), block_number: 0, open: false } + pub fn new(chain_provider: CP, input_fetcher: PIF, source: I, id: BlockID) -> Self { + Self { chain_provider, input_fetcher, source, id, commitment: None } } } #[async_trait] -impl AsyncIterator for PlasmaSource { +impl AsyncIterator for PlasmaSource +where + CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, +{ type Item = Bytes; async fn next(&mut self) -> Option> { - unimplemented!("Plasma will not be supported until further notice."); + // Process origin syncs the challenge contract events and updates the local challenge states + // before we can proceed to fetch the input data. This function can be called multiple times + // for the same origin and noop if the origin was already processed. It is also called if + // there is not commitment in the current origin. + match self.input_fetcher.advance_l1_origin(&self.chain_provider, self.id).await { + Some(Ok(_)) => { + tracing::debug!("plasma input fetcher - l1 origin advanced"); + } + Some(Err(PlasmaError::ReorgRequired)) => { + tracing::error!("new expired challenge"); + return Some(StageResult::Err(StageError::Reset(ResetError::NewExpiredChallenge))); + } + Some(Err(e)) => { + tracing::error!("failed to advance plasma L1 origin: {:?}", e); + return Some(StageResult::Err(StageError::Temporary(anyhow::anyhow!( + "failed to advance plasma L1 origin: {:?}", + e + )))); + } + None => { + tracing::warn!("l1 origin advance returned None"); + } + } + + // Set the commitment if it isn't available. + if self.commitment.is_none() { + // The l1 source returns the input commitment for the batch. + let data = match self.source.next().ok_or(PlasmaError::NotEnoughData) { + Ok(d) => d, + Err(e) => { + tracing::warn!("failed to pull next data from the plasma source iterator"); + return Some(Err(StageError::Plasma(e))); + } + }; + + // If the data is empty, + if data.is_empty() { + tracing::warn!("empty data from plasma source"); + return Some(Err(StageError::Plasma(PlasmaError::NotEnoughData))); + } + + // If the tx data type is not plasma, we forward it downstream to let the next + // steps validate and potentially parse it as L1 DA inputs. + if data[0] != TX_DATA_VERSION_1 { + tracing::info!("non-plasma tx data, forwarding downstream"); + return Some(Ok(data)); + } + + // Validate that the batcher inbox data is a commitment. + self.commitment = match decode_keccak256(&data[1..]) { + Ok(c) => Some(c), + Err(e) => { + tracing::warn!("invalid commitment: {}, err: {}", data, e); + return self.next().await; + } + }; + } + + // Use the commitment to fetch the input from the plasma DA provider. + let commitment = self.commitment.as_ref().expect("the commitment must be set"); + + // Fetch the input data from the plasma DA provider. + let data = match self + .input_fetcher + .get_input(&self.chain_provider, commitment.clone(), self.id) + .await + { + Some(Ok(data)) => data, + Some(Err(PlasmaError::ReorgRequired)) => { + // The plasma fetcher may call for a reorg if the pipeline is stalled and the plasma + // DA manager continued syncing origins detached from the pipeline + // origin. + tracing::warn!("challenge for a new previously derived commitment expired"); + return Some(Err(StageError::Reset(ResetError::ReorgRequired))); + } + Some(Err(PlasmaError::ChallengeExpired)) => { + // This commitment was challenged and the challenge expired. + tracing::warn!("challenge expired, skipping batch"); + self.commitment = None; + // Skip the input. + return self.next().await + } + Some(Err(PlasmaError::MissingPastWindow)) => { + tracing::warn!("missing past window, skipping batch"); + return Some(Err(StageError::Critical(anyhow::anyhow!( + "data for commitment {:?} not available", + commitment + )))); + } + Some(Err(PlasmaError::ChallengePending)) => { + // Continue stepping without slowing down. + tracing::debug!("plasma challenge pending, proceeding"); + return Some(Err(StageError::NotEnoughData)); + } + Some(Err(e)) => { + // Return temporary error so we can keep retrying. + return Some(Err(StageError::Temporary(anyhow::anyhow!( + "failed to fetch input data with comm {:?} from da service: {:?}", + commitment, + e + )))); + } + None => { + // Return temporary error so we can keep retrying. + return Some(Err(StageError::Temporary(anyhow::anyhow!( + "failed to fetch input data with comm {:?} from da service", + commitment + )))); + } + }; + + // The data length is limited to a max size to ensure they can be challenged in the DA + // contract. + if data.len() > MAX_INPUT_SIZE { + tracing::warn!("input data (len {}) exceeds max size {MAX_INPUT_SIZE}", data.len()); + self.commitment = None; + return self.next().await; + } + + // Reset the commitment so we can fetch the next one from the source at the next iteration. + self.commitment = None; + + return Some(Ok(data)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::stages::test_utils::{CollectingLayer, TraceStorage}; + use alloc::vec; + use kona_plasma::test_utils::{TestChainProvider, TestPlasmaInputFetcher}; + use tracing::Level; + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + + #[tokio::test] + async fn test_next_plasma_advance_origin_reorg_error() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { + advances: vec![Err(PlasmaError::ReorgRequired)], + ..Default::default() + }; + let source = vec![Bytes::from("hello"), Bytes::from("world")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Reset(ResetError::NewExpiredChallenge)); } + + #[tokio::test] + async fn test_next_plasma_advance_origin_other_error() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { + advances: vec![Err(PlasmaError::NotEnoughData)], + ..Default::default() + }; + let source = vec![Bytes::from("hello"), Bytes::from("world")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + matches!(err, StageError::Temporary(_)); + } + + #[tokio::test] + async fn test_next_plasma_not_enough_source_data() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let source = vec![].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Plasma(PlasmaError::NotEnoughData)); + } + + #[tokio::test] + async fn test_next_plasma_empty_source_data() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let source = vec![Bytes::from("")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Plasma(PlasmaError::NotEnoughData)); + + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("empty data from plasma source")); + } + + #[tokio::test] + async fn test_next_plasma_non_plasma_tx_data_forwards() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let first = Bytes::copy_from_slice(&[2u8]); + let source = vec![first.clone()].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let data = plasma_source.next().await.unwrap().unwrap(); + assert_eq!(data, first); + + let logs = trace_store.get_by_level(Level::INFO); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("non-plasma tx data, forwarding downstream")); + } + + // TODO: more tests } diff --git a/crates/derive/src/sources/source.rs b/crates/derive/src/sources/source.rs index 95129e20b1..8dd392a850 100644 --- a/crates/derive/src/sources/source.rs +++ b/crates/derive/src/sources/source.rs @@ -8,27 +8,34 @@ use crate::{ use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; +use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; /// An enum over the various data sources. #[derive(Debug, Clone)] -pub enum DataSource +pub enum DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, + PCP: PlasmaChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, { /// A calldata source. Calldata(CalldataSource), /// A blob source. Blob(BlobSource), /// A plasma source. - Plasma(PlasmaSource), + Plasma(PlasmaSource), } #[async_trait] -impl AsyncIterator for DataSource +impl AsyncIterator for DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, + PCP: PlasmaChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, { type Item = Bytes; diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index d677cdba72..bedd54b36e 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -5,6 +5,7 @@ use crate::types::{BlockID, Frame}; use alloc::vec::Vec; use alloy_primitives::{Bytes, B256}; use core::fmt::Display; +use kona_plasma::types::PlasmaError; /// A result type for the derivation pipeline stages. pub type StageResult = Result; @@ -14,6 +15,12 @@ pub type StageResult = Result; pub enum StageError { /// There is no data to read from the channel bank. Eof, + /// A temporary error that allows the operation to be retried. + Temporary(anyhow::Error), + /// A critical error. + Critical(anyhow::Error), + /// Plasma data source error. + Plasma(PlasmaError), /// There is not enough data progress, but if we wait, the stage will eventually return data /// or produce an EOF error. NotEnoughData, @@ -61,6 +68,9 @@ impl PartialEq for StageError { matches!( (self, other), (StageError::Eof, StageError::Eof) | + (StageError::Temporary(_), StageError::Temporary(_)) | + (StageError::Critical(_), StageError::Critical(_)) | + (StageError::Plasma(_), StageError::Plasma(_)) | (StageError::NotEnoughData, StageError::NotEnoughData) | (StageError::NoChannelsAvailable, StageError::NoChannelsAvailable) | (StageError::NoChannel, StageError::NoChannel) | @@ -93,6 +103,9 @@ impl Display for StageError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { StageError::Eof => write!(f, "End of file"), + StageError::Temporary(e) => write!(f, "Temporary error: {}", e), + StageError::Critical(e) => write!(f, "Critical error: {}", e), + StageError::Plasma(e) => write!(f, "Plasma error: {:?}", e), StageError::NotEnoughData => write!(f, "Not enough data"), StageError::BlockFetch(hash) => { write!(f, "Failed to fetch block info and transactions by hash: {}", hash) @@ -126,6 +139,10 @@ pub enum ResetError { /// The first argument is the expected timestamp, and the second argument is the actual /// timestamp. BadTimestamp(u64, u64), + /// A reorg is required. + ReorgRequired, + /// A new expired challenge. + NewExpiredChallenge, } impl PartialEq for ResetError { @@ -137,6 +154,8 @@ impl PartialEq for ResetError { (ResetError::BadTimestamp(e1, a1), ResetError::BadTimestamp(e2, a2)) => { e1 == e2 && a1 == a2 } + (ResetError::ReorgRequired, ResetError::ReorgRequired) => true, + (ResetError::NewExpiredChallenge, ResetError::NewExpiredChallenge) => true, _ => false, } } @@ -151,6 +170,8 @@ impl Display for ResetError { ResetError::BadTimestamp(expected, actual) => { write!(f, "Bad timestamp: expected {}, got {}", expected, actual) } + ResetError::ReorgRequired => write!(f, "Reorg required"), + ResetError::NewExpiredChallenge => write!(f, "New expired challenge"), } } } diff --git a/crates/plasma/Cargo.toml b/crates/plasma/Cargo.toml new file mode 100644 index 0000000000..4bacdf9378 --- /dev/null +++ b/crates/plasma/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "kona-plasma" +description = "Plasma Data Availability Adapter" +version = "0.0.1" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +homepage.workspace = true + +[dependencies] +# Workspace +anyhow.workspace = true +tracing.workspace = true + +# Local +kona-primitives = { path = "../primitives" } + +# External +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f07", default-features = false } +alloy-primitives = { workspace = true, features = ["rlp"] } +async-trait = "0.1.77" + +# `serde` feature dependencies +serde = { version = "1.0.197", default-features = false, features = ["derive"], optional = true } + +[dev-dependencies] +tracing-subscriber = "0.3.18" +serde_json = { version = "1.0.68", default-features = false } + +[features] +default = ["serde"] +serde = ["dep:serde"] +test-utils = [] diff --git a/crates/plasma/README.md b/crates/plasma/README.md new file mode 100644 index 0000000000..4eadf2ea96 --- /dev/null +++ b/crates/plasma/README.md @@ -0,0 +1,27 @@ +# `kona-plasma` + +Plasma Data Availability Adapter for `kona-derive`. + +[plasma]: https://specs.optimism.io/experimental/plasma.html + +`kona-plasma` is an implementation of the [Plasma][plasma] OP Stack Specification in rust. + +## Usage + +Add `kona-plasma` to your `Cargo.toml`. + +```ignore +[dependencies] +kona-plasma = "0.0.1" + +# Serde is enabled by default and can be disabled by toggling default-features off +# kona-plasma = { version = "0.0.1", default-features = false } +``` + +## Features + +### Serde + +[`serde`] serialization and deserialization support for `kona-plasma` types. + +By default, the `serde` feature is enabled on `kona-plasma`. diff --git a/crates/plasma/src/lib.rs b/crates/plasma/src/lib.rs new file mode 100644 index 0000000000..4fee90dbfc --- /dev/null +++ b/crates/plasma/src/lib.rs @@ -0,0 +1,24 @@ +#![doc = include_str!("../README.md")] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] +#![deny(unused_must_use, rust_2018_idioms)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![no_std] + +extern crate alloc; + +pub mod traits; +pub mod types; + +// Re-export kona primitives. +pub use kona_primitives::prelude::*; + +/// The prelude exports common types and traits. +pub mod prelude { + pub use crate::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{FinalizedHeadSignal, Keccak256Commitment, PlasmaError, SystemConfig}, + }; +} + +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/plasma/src/test_utils.rs b/crates/plasma/src/test_utils.rs new file mode 100644 index 0000000000..aaa7cab938 --- /dev/null +++ b/crates/plasma/src/test_utils.rs @@ -0,0 +1,150 @@ +//! Test utilities for the Plasma crate. + +use crate::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{FinalizedHeadSignal, PlasmaError}, +}; +use alloc::{boxed::Box, vec::Vec}; +use alloy_consensus::{Header, Receipt, TxEnvelope}; +use alloy_primitives::{Bytes, B256}; +use anyhow::Result; +use async_trait::async_trait; +use kona_primitives::{ + block::{BlockID, BlockInfo}, + system_config::SystemConfig, +}; + +/// A mock plasma input fetcher for testing. +#[derive(Debug, Clone, Default)] +pub struct TestPlasmaInputFetcher { + /// Inputs to return. + pub inputs: Vec>, + /// Advance L1 origin results. + pub advances: Vec>, + /// Reset results. + pub resets: Vec>, +} + +#[async_trait] +impl PlasmaInputFetcher for TestPlasmaInputFetcher { + async fn get_input( + &mut self, + _fetcher: &TestChainProvider, + _commitment: Bytes, + _block: BlockID, + ) -> Option> { + self.inputs.pop() + } + + async fn advance_l1_origin( + &mut self, + _fetcher: &TestChainProvider, + _block: BlockID, + ) -> Option> { + self.advances.pop() + } + + async fn reset( + &mut self, + _block_number: BlockInfo, + _cfg: SystemConfig, + ) -> Option> { + self.resets.pop() + } + + async fn finalize(&mut self, _block_number: BlockInfo) -> Option> { + None + } + + fn on_finalized_head_signal(&mut self, _block_number: FinalizedHeadSignal) {} +} + +/// A mock chain provider for testing. +#[derive(Debug, Clone, Default)] +pub struct TestChainProvider { + /// Maps block numbers to block information using a tuple list. + pub blocks: Vec<(u64, BlockInfo)>, + /// Maps block hashes to header information using a tuple list. + pub headers: Vec<(B256, Header)>, + /// Maps block hashes to receipts using a tuple list. + pub receipts: Vec<(B256, Vec)>, +} + +impl TestChainProvider { + /// Insert a block into the mock chain provider. + pub fn insert_block(&mut self, number: u64, block: BlockInfo) { + self.blocks.push((number, block)); + } + + /// Insert receipts into the mock chain provider. + pub fn insert_receipts(&mut self, hash: B256, receipts: Vec) { + self.receipts.push((hash, receipts)); + } + + /// Insert a header into the mock chain provider. + pub fn insert_header(&mut self, hash: B256, header: Header) { + self.headers.push((hash, header)); + } + + /// Clears headers from the mock chain provider. + pub fn clear_headers(&mut self) { + self.headers.clear(); + } + + /// Clears blocks from the mock chain provider. + pub fn clear_blocks(&mut self) { + self.blocks.clear(); + } + + /// Clears receipts from the mock chain provider. + pub fn clear_receipts(&mut self) { + self.receipts.clear(); + } + + /// Clears all blocks and receipts from the mock chain provider. + pub fn clear(&mut self) { + self.clear_blocks(); + self.clear_receipts(); + self.clear_headers(); + } +} + +#[async_trait] +impl ChainProvider for TestChainProvider { + async fn header_by_hash(&mut self, hash: B256) -> Result
{ + if let Some((_, header)) = self.headers.iter().find(|(_, b)| b.hash_slow() == hash) { + Ok(header.clone()) + } else { + Err(anyhow::anyhow!("Header not found")) + } + } + + async fn block_info_by_number(&mut self, _number: u64) -> Result { + if let Some((_, block)) = self.blocks.iter().find(|(n, _)| *n == _number) { + Ok(*block) + } else { + Err(anyhow::anyhow!("Block not found")) + } + } + + async fn receipts_by_hash(&mut self, _hash: B256) -> Result> { + if let Some((_, receipts)) = self.receipts.iter().find(|(h, _)| *h == _hash) { + Ok(receipts.clone()) + } else { + Err(anyhow::anyhow!("Receipts not found")) + } + } + + async fn block_info_and_transactions_by_hash( + &mut self, + hash: B256, + ) -> Result<(BlockInfo, Vec)> { + let block = self + .blocks + .iter() + .find(|(_, b)| b.hash == hash) + .map(|(_, b)| *b) + .ok_or_else(|| anyhow::anyhow!("Block not found"))?; + Ok((block, Vec::new())) + } +} diff --git a/crates/plasma/src/traits.rs b/crates/plasma/src/traits.rs new file mode 100644 index 0000000000..a86769b81d --- /dev/null +++ b/crates/plasma/src/traits.rs @@ -0,0 +1,65 @@ +//! Traits for plasma sources and internal components. + +use crate::types::{FinalizedHeadSignal, PlasmaError}; +use alloc::{boxed::Box, vec::Vec}; +use alloy_consensus::{Header, Receipt, TxEnvelope}; +use alloy_primitives::{Bytes, B256}; +use async_trait::async_trait; +use kona_primitives::{ + block::{BlockID, BlockInfo}, + system_config::SystemConfig, +}; + +/// Describes the functionality of a data source that can provide information from the blockchain. +#[async_trait] +pub trait ChainProvider { + /// Fetch the L1 [Header] for the given [B256] hash. + async fn header_by_hash(&mut self, hash: B256) -> anyhow::Result
; + + /// Returns the block at the given number, or an error if the block does not exist in the data + /// source. + async fn block_info_by_number(&mut self, number: u64) -> anyhow::Result; + + /// Returns all receipts in the block with the given hash, or an error if the block does not + /// exist in the data source. + async fn receipts_by_hash(&mut self, hash: B256) -> anyhow::Result>; + + /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. + async fn block_info_and_transactions_by_hash( + &mut self, + hash: B256, + ) -> anyhow::Result<(BlockInfo, Vec)>; +} + +/// A plasma input fetcher. +#[async_trait] +pub trait PlasmaInputFetcher { + /// Get the input for the given commitment at the given block number from the DA storage + /// service. + async fn get_input( + &mut self, + fetcher: &CP, + commitment: Bytes, + block: BlockID, + ) -> Option>; + + /// Advance the L1 origin to the given block number, syncing the DA challenge events. + async fn advance_l1_origin( + &mut self, + fetcher: &CP, + block: BlockID, + ) -> Option>; + + /// Reset the challenge origin in case of L1 reorg. + async fn reset( + &mut self, + block_number: BlockInfo, + cfg: SystemConfig, + ) -> Option>; + + /// Notify L1 finalized head so plasma finality is always behind L1. + async fn finalize(&mut self, block_number: BlockInfo) -> Option>; + + /// Set the engine finalization signal callback. + fn on_finalized_head_signal(&mut self, callback: FinalizedHeadSignal); +} diff --git a/crates/plasma/src/types.rs b/crates/plasma/src/types.rs new file mode 100644 index 0000000000..fe449d0c10 --- /dev/null +++ b/crates/plasma/src/types.rs @@ -0,0 +1,84 @@ +//! Types for the Kona Plasma crate. + +use alloc::boxed::Box; +use alloy_primitives::{Address, Bytes, U256}; +use core::fmt::Display; +use kona_primitives::block::BlockInfo; + +/// A plasma error. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum PlasmaError { + /// A reorg is required. + ReorgRequired, + /// Not enough data. + NotEnoughData, + /// The commitment was challenge, but the challenge period expired. + ChallengeExpired, + /// Missing data past the challenge period. + MissingPastWindow, + /// A challenge is pending for the given commitment + ChallengePending, +} + +impl Display for PlasmaError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::ReorgRequired => write!(f, "reorg required"), + Self::NotEnoughData => write!(f, "not enough data"), + Self::ChallengeExpired => write!(f, "challenge expired"), + Self::MissingPastWindow => write!(f, "missing past window"), + Self::ChallengePending => write!(f, "challenge pending"), + } + } +} + +/// Max input size ensures the canonical chain cannot include input batches too large to +/// challenge in the Data Availability Challenge contract. Value in number of bytes. +/// This value can only be changed in a hard fork. +pub const MAX_INPUT_SIZE: usize = 130672; + +/// TxDataVersion1 is the version number for batcher transactions containing +/// plasma commitments. It should not collide with DerivationVersion which is still +/// used downstream when parsing the frames. +pub const TX_DATA_VERSION_1: u8 = 1; + +/// The default commitment type. +pub type Keccak256Commitment = Bytes; + +/// The default commitment type for the DA storage. +pub const KECCAK_256_COMMITMENT_TYPE: u8 = 0; + +/// DecodeKeccak256 validates and casts the commitment into a Keccak256Commitment. +pub fn decode_keccak256(commitment: &[u8]) -> Result { + if commitment.is_empty() { + return Err(PlasmaError::NotEnoughData); + } + if commitment[0] != KECCAK_256_COMMITMENT_TYPE { + return Err(PlasmaError::NotEnoughData); + } + let c = &commitment[1..]; + if c.len() != 32 { + return Err(PlasmaError::NotEnoughData); + } + Ok(Bytes::copy_from_slice(c)) +} + +/// A callback method for the finalized head signal. +pub type FinalizedHeadSignal = Box; + +/// Optimism system config contract values +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct SystemConfig { + /// Batch sender address + pub batcher_addr: Address, + /// L2 gas limit + pub gas_limit: U256, + /// Fee overhead + #[cfg_attr(feature = "serde", serde(rename = "overhead"))] + pub l1_fee_overhead: U256, + /// Fee scalar + #[cfg_attr(feature = "serde", serde(rename = "scalar"))] + pub l1_fee_scalar: U256, +}