diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 7b05bbf238..e19bb73038 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -138,7 +138,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all --manifest-path arbitrator/Cargo.toml + args: --all --manifest-path arbitrator/Cargo.toml -- -D warnings - name: Run rust tests uses: actions-rs/cargo@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f4154d082..0773ea8ce7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -136,7 +136,7 @@ jobs: skip-pkg-cache: true - name: run tests with race detection - run: gotestsum --format short-verbose --jsonfile test-output-withrace.json -- -race ./... + run: gotestsum --format short-verbose --jsonfile test-output-withrace.json -- ./... -race -parallel=1 - name: Annotate tests with race detection if: always() @@ -146,7 +146,7 @@ jobs: - name: run tests without race detection if: always() - run: gotestsum --format short-verbose --jsonfile test-output.json -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... + run: gotestsum --format short-verbose --jsonfile test-output.json -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=1 - name: Annotate tests without race detection if: always() diff --git a/.gitignore b/.gitignore index 5f79cf7daa..56042c284f 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ contracts/cache/ solgen/go/ contracts/deployments/ contracts/test/prover/proofs/*.json +contracts/test/prover/spec-proofs/*.json .make/ /cmd/statetransfer/statetransfer /reproducible-wasm/*.wasm diff --git a/.gitmodules b/.gitmodules index de04ba435c..ae5be5e866 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,3 +13,6 @@ [submodule "blockscout"] path = blockscout url = https://github.com/OffchainLabs/blockscout.git +[submodule "arbitrator/wasm-testsuite/testsuite"] + path = arbitrator/wasm-testsuite/testsuite + url = https://github.com/WebAssembly/testsuite.git diff --git a/Dockerfile b/Dockerfile index 897fd0dc67..0ffaa8e7fe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -76,6 +76,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get install -y make && \ cargo install --force cbindgen COPY arbitrator/Cargo.* arbitrator/cbindgen.toml arbitrator/ +COPY arbitrator/prover/Cargo.toml arbitrator/prover/ COPY ./Makefile ./ COPY arbitrator/prover arbitrator/prover RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-prover-header diff --git a/Makefile b/Makefile index 6e8cd22aab..3d1b15301b 100644 --- a/Makefile +++ b/Makefile @@ -118,8 +118,9 @@ clean: rm -rf arbitrator/prover/test-cases/rust/target rm -f arbitrator/prover/test-cases/*.wasm rm -f arbitrator/prover/test-cases/go/main + rm -rf arbitrator/wasm-testsuite/tests rm -rf $(output_root) - rm -f contracts/test/prover/proofs/*.json + rm -f contracts/test/prover/proofs/*.json contracts/test/prover/spec-proofs/*.json rm -rf arbitrator/target rm -rf arbitrator/wasm-libraries/target rm -f arbitrator/wasm-libraries/soft-float/soft-float.wasm @@ -214,62 +215,16 @@ $(output_root)/machines/latest/soft-float.wasm: $(DEP_PREDICATE) \ arbitrator/wasm-libraries/soft-float/bindings64.o \ arbitrator/wasm-libraries/soft-float/SoftFloat/build/Wasm-Clang/*.o \ --no-entry -o $@ \ - --export wavm__f32_abs \ - --export wavm__f32_neg \ - --export wavm__f32_ceil \ - --export wavm__f32_floor \ - --export wavm__f32_trunc \ - --export wavm__f32_nearest \ - --export wavm__f32_sqrt \ - --export wavm__f32_add \ - --export wavm__f32_sub \ - --export wavm__f32_mul \ - --export wavm__f32_div \ - --export wavm__f32_min \ - --export wavm__f32_max \ - --export wavm__f32_copysign \ - --export wavm__f32_eq \ - --export wavm__f32_ne \ - --export wavm__f32_lt \ - --export wavm__f32_le \ - --export wavm__f32_gt \ - --export wavm__f32_ge \ - --export wavm__i32_trunc_f32_s \ - --export wavm__i32_trunc_f32_u \ - --export wavm__i64_trunc_f32_s \ - --export wavm__i64_trunc_f32_u \ - --export wavm__f32_convert_i32_s \ - --export wavm__f32_convert_i32_u \ - --export wavm__f32_convert_i64_s \ - --export wavm__f32_convert_i64_u \ - --export wavm__f64_abs \ - --export wavm__f64_neg \ - --export wavm__f64_ceil \ - --export wavm__f64_floor \ - --export wavm__f64_trunc \ - --export wavm__f64_nearest \ - --export wavm__f64_sqrt \ - --export wavm__f64_add \ - --export wavm__f64_sub \ - --export wavm__f64_mul \ - --export wavm__f64_div \ - --export wavm__f64_min \ - --export wavm__f64_max \ - --export wavm__f64_copysign \ - --export wavm__f64_eq \ - --export wavm__f64_ne \ - --export wavm__f64_lt \ - --export wavm__f64_le \ - --export wavm__f64_gt \ - --export wavm__f64_ge \ - --export wavm__i32_trunc_f64_s \ - --export wavm__i32_trunc_f64_u \ - --export wavm__i64_trunc_f64_s \ - --export wavm__i64_trunc_f64_u \ - --export wavm__f64_convert_i32_s \ - --export wavm__f64_convert_i32_u \ - --export wavm__f64_convert_i64_s \ - --export wavm__f64_convert_i64_u \ + $(patsubst %,--export wavm__f32_%, abs neg ceil floor trunc nearest sqrt add sub mul div min max) \ + $(patsubst %,--export wavm__f32_%, copysign eq ne lt le gt ge) \ + $(patsubst %,--export wavm__f64_%, abs neg ceil floor trunc nearest sqrt add sub mul div min max) \ + $(patsubst %,--export wavm__f64_%, copysign eq ne lt le gt ge) \ + $(patsubst %,--export wavm__i32_trunc_%, f32_s f32_u f64_s f64_u) \ + $(patsubst %,--export wavm__i32_trunc_sat_%, f32_s f32_u f64_s f64_u) \ + $(patsubst %,--export wavm__i64_trunc_%, f32_s f32_u f64_s f64_u) \ + $(patsubst %,--export wavm__i64_trunc_sat_%, f32_s f32_u f64_s f64_u) \ + $(patsubst %,--export wavm__f32_convert_%, i32_s i32_u i64_s i64_u) \ + $(patsubst %,--export wavm__f64_convert_%, i32_s i32_u i64_s i64_u) \ --export wavm__f32_demote_f64 \ --export wavm__f64_promote_f32 @@ -323,6 +278,7 @@ contracts/test/prover/proofs/%.json: arbitrator/prover/test-cases/%.wasm $(arbit .make/fmt: $(DEP_PREDICATE) build-node-deps .make/yarndeps $(ORDER_ONLY_PREDICATE) .make golangci-lint run --disable-all -E gofmt --fix cargo fmt --all --manifest-path arbitrator/Cargo.toml -- --check + cargo fmt --all --manifest-path arbitrator/wasm-testsuite/Cargo.toml -- --check yarn --cwd contracts prettier:solidity @touch $@ diff --git a/arbitrator/Cargo.toml b/arbitrator/Cargo.toml index 7cb14d8b11..415602a5a3 100644 --- a/arbitrator/Cargo.toml +++ b/arbitrator/Cargo.toml @@ -1,6 +1,6 @@ [workspace] members = [ - "prover", + "prover", ] [profile.release] diff --git a/arbitrator/prover/fuzz/fuzz_targets/osp.rs b/arbitrator/prover/fuzz/fuzz_targets/osp.rs index 10f7174b99..1ceef8355c 100644 --- a/arbitrator/prover/fuzz/fuzz_targets/osp.rs +++ b/arbitrator/prover/fuzz/fuzz_targets/osp.rs @@ -187,6 +187,7 @@ fn fuzz_impl(data: &[u8]) -> Result<()> { &[], wavm_binary, true, + true, false, GlobalState::default(), Default::default(), diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index 12c45f8235..c4e64f9991 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -66,7 +66,8 @@ pub enum FloatInstruction { UnOp(FloatType, FloatUnOp), BinOp(FloatType, FloatBinOp), RelOp(FloatType, FloatRelOp), - TruncIntOp(IntegerValType, FloatType, bool), + /// The bools represent (saturating, signed) + TruncIntOp(IntegerValType, FloatType, bool, bool), ConvertIntOp(FloatType, IntegerValType, bool), F32DemoteF64, F64PromoteF32, @@ -80,7 +81,7 @@ impl FloatInstruction { FloatInstruction::RelOp(t, _) => { FunctionType::new(vec![t.into(); 2], vec![ArbValueType::I32]) } - FloatInstruction::TruncIntOp(i, f, _) => { + FloatInstruction::TruncIntOp(i, f, ..) => { FunctionType::new(vec![f.into()], vec![i.into()]) } FloatInstruction::ConvertIntOp(f, i, _) => { @@ -171,12 +172,15 @@ impl FromStr for FloatInstruction { map( all_consuming(tuple(( parse_int_type, - tag("_trunc_"), + alt(( + value(true, tag("_trunc_sat_")), + value(false, tag("_trunc_")), + )), parse_fp_type, tag("_"), parse_signedness, ))), - |(i, _, f, _, s)| FloatInstruction::TruncIntOp(i, f, s), + |(i, sat, f, _, s)| FloatInstruction::TruncIntOp(i, f, sat, s), ), map( all_consuming(tuple(( @@ -230,9 +234,6 @@ pub struct Local { pub struct NameCustomSection { pub module: String, pub functions: HashMap, - // TODO: remove this when re-initializing the rollup - // this is kept around to deserialize old binaries - pub _locals_removed: HashMap>, } #[derive(Clone, Default)] diff --git a/arbitrator/prover/src/console.rs b/arbitrator/prover/src/console.rs new file mode 100644 index 0000000000..1abb509065 --- /dev/null +++ b/arbitrator/prover/src/console.rs @@ -0,0 +1,85 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +#![allow(dead_code)] + +use std::fmt; + +pub struct Color; + +impl Color { + pub const RED: &'static str = "\x1b[31;1m"; + pub const BLUE: &'static str = "\x1b[34;1m"; + pub const YELLOW: &'static str = "\x1b[33;1m"; + pub const PINK: &'static str = "\x1b[38;5;161;1m"; + pub const MINT: &'static str = "\x1b[38;5;48;1m"; + pub const GREY: &'static str = "\x1b[90m"; + pub const RESET: &'static str = "\x1b[0;0m"; + + pub const LIME: &'static str = "\x1b[38;5;119;1m"; + pub const LAVENDER: &'static str = "\x1b[38;5;183;1m"; + pub const MAROON: &'static str = "\x1b[38;5;124;1m"; + pub const ORANGE: &'static str = "\x1b[38;5;202;1m"; + + pub fn color(color: &str, text: S) -> String { + format!("{}{}{}", color, text, Color::RESET) + } + + /// Colors text red. + pub fn red(text: S) -> String { + Color::color(Color::RED, text) + } + + /// Colors text blue. + pub fn blue(text: S) -> String { + Color::color(Color::BLUE, text) + } + + /// Colors text yellow. + pub fn yellow(text: S) -> String { + Color::color(Color::YELLOW, text) + } + + /// Colors text pink. + pub fn pink(text: S) -> String { + Color::color(Color::PINK, text) + } + + /// Colors text grey. + pub fn grey(text: S) -> String { + Color::color(Color::GREY, text) + } + + /// Colors text lavender. + pub fn lavender(text: S) -> String { + Color::color(Color::LAVENDER, text) + } + + /// Colors text mint. + pub fn mint(text: S) -> String { + Color::color(Color::MINT, text) + } + + /// Colors text lime. + pub fn lime(text: S) -> String { + Color::color(Color::LIME, text) + } + + /// Colors text orange. + pub fn orange(text: S) -> String { + Color::color(Color::ORANGE, text) + } + + /// Colors text maroon. + pub fn maroon(text: S) -> String { + Color::color(Color::MAROON, text) + } + + /// Color a bool one of two colors depending on its value. + pub fn color_if(cond: bool, true_color: &str, false_color: &str) -> String { + match cond { + true => Color::color(true_color, &format!("{}", cond)), + false => Color::color(false_color, &format!("{}", cond)), + } + } +} diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index dcbdfac9b3..be6f117b5c 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -1,9 +1,11 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -#![allow(clippy::missing_safety_doc)] // We have a lot of unsafe ABI +#![allow(clippy::missing_safety_doc, clippy::too_many_arguments)] pub mod binary; +/// cbindgen:ignore +pub mod console; mod host; pub mod machine; /// cbindgen:ignore @@ -11,7 +13,7 @@ mod memory; mod merkle; mod reinterpret; pub mod utils; -mod value; +pub mod value; pub mod wavm; use crate::machine::{argument_data_to_inbox, Machine}; @@ -77,6 +79,7 @@ unsafe fn arbitrator_load_machine_impl( let mach = Machine::from_paths( &libraries, binary_path, + true, false, false, Default::default(), diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index 9a8e6e992d..63261e88e1 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -3,11 +3,12 @@ use crate::{ binary::{parse, FloatInstruction, Local, NameCustomSection, WasmBinary}, + console::Color, host::get_host_impl, memory::Memory, merkle::{Merkle, MerkleType}, reinterpret::{ReinterpretAsSigned, ReinterpretAsUnsigned}, - utils::{file_bytes, Bytes32, CBytes, DeprecatedTableType}, + utils::{file_bytes, Bytes32, CBytes, RemoteTableType}, value::{ArbValueType, FunctionType, IntegerValType, ProgramCounter, Value}, wavm::{ pack_cross_module_call, unpack_cross_module_call, wasm_to_wavm, FloatingPointImpls, @@ -20,11 +21,12 @@ use fnv::FnvHashMap as HashMap; use num::{traits::PrimInt, Zero}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, FromInto}; +use serde_with::serde_as; use sha3::Keccak256; use std::{ borrow::Cow, convert::TryFrom, + fmt, fs::File, io::{BufReader, BufWriter, Write}, num::Wrapping, @@ -209,7 +211,7 @@ impl TableElement { #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize)] struct Table { - #[serde_as(as = "FromInto")] + #[serde(with = "RemoteTableType")] ty: TableType, elems: Vec, #[serde(skip)] @@ -345,7 +347,7 @@ impl Module { ) }, func_ty.clone(), - &types, + types, )?); host_call_hooks.push(None); } @@ -354,12 +356,24 @@ impl Module { "Multiple memories are not supported" ); if let Some(limits) = bin.memories.get(0) { - // We ignore the maximum size - let size = usize::try_from(limits.initial) - .ok() - .and_then(|x| x.checked_mul(Memory::PAGE_SIZE)) - .ok_or_else(|| eyre!("Memory size is too large"))?; - memory = Memory::new(size); + let page_size = Memory::PAGE_SIZE; + let initial = limits.initial; // validate() checks this is less than max::u32 + let allowed = u32::MAX as u64 / Memory::PAGE_SIZE - 1; // we require the size remain *below* 2^32 + + let max_size = match limits.maximum { + Some(pages) => u64::min(allowed, pages), + _ => allowed, + }; + if initial > max_size { + bail!( + "Memory inits to a size larger than its max: {} vs {}", + limits.initial, + max_size + ); + } + let size = initial * page_size; + + memory = Memory::new(size as usize, max_size); } let mut globals = vec![]; @@ -398,7 +412,7 @@ impl Module { }; if !matches!( offset.checked_add(data.data.len()), - Some(x) if (x as u64) < memory.size() as u64, + Some(x) if (x as u64) <= memory.size() as u64, ) { bail!( "Out-of-bounds data memory init with offset {} and size {}", @@ -568,6 +582,7 @@ impl Module { ); data.extend(self.memory.size().to_be_bytes()); + data.extend(self.memory.max_size.to_be_bytes()); data.extend(mem_merkle.root()); data.extend(self.tables_merkle.root()); @@ -619,6 +634,23 @@ impl GlobalState { } } +#[derive(Serialize)] +pub struct ProofInfo { + pub before: String, + pub proof: String, + pub after: String, +} + +impl ProofInfo { + pub fn new(before: String, proof: String, after: String) -> Self { + Self { + before, + proof, + after, + } + } +} + /// cbindgen:ignore #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[repr(u8)] @@ -641,7 +673,6 @@ pub struct MachineState<'a> { status: MachineStatus, value_stack: Cow<'a, Vec>, internal_stack: Cow<'a, Vec>, - block_stack: Cow<'a, Vec>, frame_stack: Cow<'a, Vec>, modules: Vec>, global_state: GlobalState, @@ -660,6 +691,12 @@ struct PreimageResolverWrapper { last_resolved: Option<(Bytes32, CBytes)>, } +impl fmt::Debug for PreimageResolverWrapper { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "resolver...") + } +} + impl PreimageResolverWrapper { pub fn new(resolver: PreimageResolver) -> PreimageResolverWrapper { PreimageResolverWrapper { @@ -693,13 +730,12 @@ impl PreimageResolverWrapper { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Machine { steps: u64, // Not part of machine hash status: MachineStatus, value_stack: Vec, internal_stack: Vec, - block_stack: Vec, frame_stack: Vec, modules: Vec, modules_merkle: Option, @@ -732,13 +768,6 @@ fn hash_value_stack(stack: &[Value]) -> Bytes32 { hash_stack(stack.iter().map(|v| v.hash()), "Value stack:") } -fn hash_pc_stack(pcs: &[usize]) -> Bytes32 { - hash_stack( - pcs.iter().map(|pc| (*pc as u32).to_be_bytes()), - "Program counter stack:", - ) -} - fn hash_stack_frame_stack(frames: &[StackFrame]) -> Bytes32 { hash_stack(frames.iter().map(|f| f.hash()), "Stack frame stack:") } @@ -786,7 +815,7 @@ where } #[must_use] -fn exec_ibin_op(a: T, b: T, op: IBinOpType) -> T +fn exec_ibin_op(a: T, b: T, op: IBinOpType) -> Option where Wrapping: ReinterpretAsSigned, T: Zero, @@ -798,7 +827,7 @@ where IBinOpType::DivS | IBinOpType::DivU | IBinOpType::RemS | IBinOpType::RemU, ) && b.is_zero() { - return T::zero(); + return None; } let res = match op { IBinOpType::Add => a + b, @@ -817,7 +846,7 @@ where IBinOpType::Rotl => a.rotl(b.cast_usize()), IBinOpType::Rotr => a.rotr(b.cast_usize()), }; - res.0 + Some(res.0) } #[must_use] @@ -857,6 +886,7 @@ impl Machine { pub fn from_paths( library_paths: &[PathBuf], binary_path: &Path, + language_support: bool, always_merkleize: bool, allow_hostapi_from_main: bool, global_state: GlobalState, @@ -866,7 +896,6 @@ impl Machine { let bin_source = file_bytes(binary_path)?; let bin = parse(&bin_source) .wrap_err_with(|| format!("failed to validate WASM binary at {:?}", binary_path))?; - let mut libraries = vec![]; let mut lib_sources = vec![]; for path in library_paths { @@ -880,6 +909,7 @@ impl Machine { Self::from_binaries( &libraries, bin, + language_support, always_merkleize, allow_hostapi_from_main, global_state, @@ -891,6 +921,7 @@ impl Machine { pub fn from_binaries( libraries: &[WasmBinary<'_>], bin: WasmBinary<'_>, + runtime_support: bool, always_merkleize: bool, allow_hostapi_from_main: bool, global_state: GlobalState, @@ -997,8 +1028,9 @@ impl Machine { } let main_module_idx = modules.len() - 1; let main_module = &modules[main_module_idx]; + // Rust support - if let Some(&f) = main_module.exports.get("main") { + if let Some(&f) = main_module.exports.get("main").filter(|_| runtime_support) { let mut expected_type = FunctionType::default(); expected_type.inputs.push(ArbValueType::I32); // argc expected_type.inputs.push(ArbValueType::I32); // argv @@ -1013,8 +1045,9 @@ impl Machine { entry!(Drop); entry!(HaltAndSetFinished); } + // Go support - if let Some(&f) = main_module.exports.get("run") { + if let Some(&f) = main_module.exports.get("run").filter(|_| runtime_support) { let mut expected_type = FunctionType::default(); expected_type.inputs.push(ArbValueType::I32); // argc expected_type.inputs.push(ArbValueType::I32); // argv @@ -1067,11 +1100,11 @@ impl Machine { entry!(@cross, i.module, i.func); } } + let entrypoint_types = vec![FunctionType::default()]; let mut entrypoint_names = NameCustomSection { module: "entry".into(), functions: HashMap::default(), - _locals_removed: HashMap::default(), }; entrypoint_names .functions @@ -1139,7 +1172,6 @@ impl Machine { steps: 0, value_stack: vec![Value::RefNull, Value::I32(0), Value::I32(0)], internal_stack: Vec::new(), - block_stack: Vec::new(), frame_stack: Vec::new(), modules, modules_merkle, @@ -1187,7 +1219,6 @@ impl Machine { steps: 0, value_stack: vec![Value::RefNull, Value::I32(0), Value::I32(0)], internal_stack: Vec::new(), - block_stack: Vec::new(), frame_stack: Vec::new(), modules, modules_merkle: None, @@ -1233,7 +1264,6 @@ impl Machine { status: self.status, value_stack: Cow::Borrowed(&self.value_stack), internal_stack: Cow::Borrowed(&self.internal_stack), - block_stack: Cow::Borrowed(&self.block_stack), frame_stack: Cow::Borrowed(&self.frame_stack), modules, global_state: self.global_state.clone(), @@ -1270,7 +1300,6 @@ impl Machine { self.status = new_state.status; self.value_stack = new_state.value_stack.into_owned(); self.internal_stack = new_state.internal_stack.into_owned(); - self.block_stack = new_state.block_stack.into_owned(); self.frame_stack = new_state.frame_stack.into_owned(); self.global_state = new_state.global_state; self.pc = new_state.pc; @@ -1278,6 +1307,56 @@ impl Machine { Ok(()) } + pub fn start_merkle_caching(&mut self) { + for module in &mut self.modules { + module.memory.cache_merkle_tree(); + } + self.modules_merkle = Some(Merkle::new( + MerkleType::Module, + self.modules.iter().map(Module::hash).collect(), + )); + } + + pub fn stop_merkle_caching(&mut self) { + self.modules_merkle = None; + for module in &mut self.modules { + module.memory.merkle = None; + } + } + + pub fn jump_into_function(&mut self, func: &str, mut args: Vec) { + let frame_args = [Value::RefNull, Value::I32(0), Value::I32(0)]; + args.extend(frame_args); + self.value_stack = args; + + let module = self.modules.last().expect("no module"); + let export = module.exports.iter().find(|x| x.0 == func); + let export = export + .unwrap_or_else(|| panic!("func {} not found", func)) + .1; + + self.frame_stack.clear(); + self.internal_stack.clear(); + + self.pc = ProgramCounter { + module: self.modules.len() - 1, + func: *export as usize, + inst: 0, + }; + self.status = MachineStatus::Running; + self.steps = 0; + } + + pub fn get_final_result(&self) -> Result> { + if !self.frame_stack.is_empty() { + bail!( + "machine has not successfully computed a final result {:?}", + self.status + ) + } + Ok(self.value_stack.clone()) + } + pub fn get_next_instruction(&self) -> Option { if self.is_halted() { return None; @@ -1317,12 +1396,17 @@ impl Machine { } }; } + macro_rules! error { + () => {{ + self.status = MachineStatus::Errored; + break; + }}; + } for _ in 0..n { self.steps += 1; if self.steps == Self::MAX_STEPS { - self.status = MachineStatus::Errored; - break; + error!(); } let inst = func.code[self.pc.inst]; if self.pc.inst == 1 { @@ -1348,25 +1432,8 @@ impl Machine { } self.pc.inst += 1; match inst.opcode { - Opcode::Unreachable => { - self.status = MachineStatus::Errored; - break; - } + Opcode::Unreachable => error!(), Opcode::Nop => {} - Opcode::Block => { - let idx = inst.argument_data as usize; - self.block_stack.push(idx); - debug_assert!(func.code.len() > idx); - } - Opcode::EndBlock => { - self.block_stack.pop(); - } - Opcode::EndBlockIf => { - let x = self.value_stack.last().unwrap(); - if !x.is_i32_zero() { - self.block_stack.pop().unwrap(); - } - } Opcode::InitFrame => { let caller_module_internals = self.value_stack.pop().unwrap().assume_u32(); let caller_module = self.value_stack.pop().unwrap().assume_u32(); @@ -1394,24 +1461,10 @@ impl Machine { Machine::test_next_instruction(func, &self.pc); } } - Opcode::Branch => { - self.pc.inst = self.block_stack.pop().unwrap(); - Machine::test_next_instruction(func, &self.pc); - } - Opcode::BranchIf => { - let x = self.value_stack.pop().unwrap(); - if !x.is_i32_zero() { - self.pc.inst = self.block_stack.pop().unwrap(); - Machine::test_next_instruction(func, &self.pc); - } - } Opcode::Return => { let frame = self.frame_stack.pop().unwrap(); match frame.return_ref { - Value::RefNull => { - self.status = MachineStatus::Errored; - break; - } + Value::RefNull => error!(), Value::InternalRef(pc) => { let changing_module = pc.module != self.pc.module; if changing_module { @@ -1469,8 +1522,7 @@ impl Machine { func = &module.funcs[self.pc.func]; } else { // The caller module has no internals - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::CallIndirect => { @@ -1497,15 +1549,11 @@ impl Machine { self.pc.inst = 0; func = &module.funcs[self.pc.func]; } - Value::RefNull => { - self.status = MachineStatus::Errored; - break; - } + Value::RefNull => error!(), v => bail!("invalid table element value {:?}", v), } } else { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::LocalGet => { @@ -1537,12 +1585,10 @@ impl Machine { if let Some(val) = val { self.value_stack.push(val); } else { - self.status = MachineStatus::Errored; - break; + error!(); } } else { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::MemoryStore { ty: _, bytes } => { @@ -1565,12 +1611,10 @@ impl Machine { }; if let Some(idx) = inst.argument_data.checked_add(base.into()) { if !module.memory.store_value(idx, val, bytes) { - self.status = MachineStatus::Errored; - break; + error!(); } } else { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::I32Const => { @@ -1647,12 +1691,13 @@ impl Machine { Some(Value::I32(x)) => x, v => bail!("WASM validation failed: bad value for memory.grow {:?}", v), }; + let page_size = Memory::PAGE_SIZE; + let max_size = module.memory.max_size * page_size; + let new_size = (|| { - let adding_size = - u64::from(adding_pages).checked_mul(Memory::PAGE_SIZE as u64)?; + let adding_size = u64::from(adding_pages).checked_mul(page_size)?; let new_size = old_size.checked_add(adding_size)?; - // Note: we require the size remain *below* 2^32, meaning the actual limit is 2^32-PAGE_SIZE - if new_size < (1 << 32) { + if new_size <= max_size { Some(new_size) } else { None @@ -1661,7 +1706,7 @@ impl Machine { if let Some(new_size) = new_size { module.memory.resize(usize::try_from(new_size).unwrap()); // Push the old number of pages - let old_pages = u32::try_from(old_size / Memory::PAGE_SIZE as u64).unwrap(); + let old_pages = u32::try_from(old_size / page_size).unwrap(); self.value_stack.push(Value::I32(old_pages)); } else { // Push -1 @@ -1693,14 +1738,34 @@ impl Machine { match w { IntegerValType::I32 => { if let (Some(Value::I32(a)), Some(Value::I32(b))) = (va, vb) { - self.value_stack.push(Value::I32(exec_ibin_op(a, b, op))); + if op == IBinOpType::DivS + && (a as i32) == i32::MIN + && (b as i32) == -1 + { + error!(); + } + let value = match exec_ibin_op(a, b, op) { + Some(value) => value, + None => error!(), + }; + self.value_stack.push(Value::I32(value)) } else { bail!("WASM validation failed: wrong types for i32binop"); } } IntegerValType::I64 => { if let (Some(Value::I64(a)), Some(Value::I64(b))) = (va, vb) { - self.value_stack.push(Value::I64(exec_ibin_op(a, b, op))); + if op == IBinOpType::DivS + && (a as i64) == i64::MIN + && (b as i64) == -1 + { + error!(); + } + let value = match exec_ibin_op(a, b, op) { + Some(value) => value, + None => error!(), + }; + self.value_stack.push(Value::I64(value)) } else { bail!("WASM validation failed: wrong types for i64binop"); } @@ -1765,20 +1830,12 @@ impl Machine { } self.value_stack.push(Value::I64(x)); } - Opcode::PushStackBoundary => { - self.value_stack.push(Value::StackBoundary); - } Opcode::MoveFromStackToInternal => { self.internal_stack.push(self.value_stack.pop().unwrap()); } Opcode::MoveFromInternalToStack => { self.value_stack.push(self.internal_stack.pop().unwrap()); } - Opcode::IsStackBoundary => { - let val = self.value_stack.pop().unwrap(); - self.value_stack - .push(Value::I32((val == Value::StackBoundary) as u32)); - } Opcode::Dup => { let val = self.value_stack.last().cloned().unwrap(); self.value_stack.push(val); @@ -1791,28 +1848,24 @@ impl Machine { .memory .store_slice_aligned(ptr.into(), &*self.global_state.bytes32_vals[idx]) { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::SetGlobalStateBytes32 => { let ptr = self.value_stack.pop().unwrap().assume_u32(); let idx = self.value_stack.pop().unwrap().assume_u32() as usize; if idx >= self.global_state.bytes32_vals.len() { - self.status = MachineStatus::Errored; - break; + error!(); } else if let Some(hash) = module.memory.load_32_byte_aligned(ptr.into()) { self.global_state.bytes32_vals[idx] = hash; } else { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::GetGlobalStateU64 => { let idx = self.value_stack.pop().unwrap().assume_u32() as usize; if idx >= self.global_state.u64_vals.len() { - self.status = MachineStatus::Errored; - break; + error!(); } else { self.value_stack .push(Value::I64(self.global_state.u64_vals[idx])); @@ -1822,8 +1875,7 @@ impl Machine { let val = self.value_stack.pop().unwrap().assume_u64(); let idx = self.value_stack.pop().unwrap().assume_u32() as usize; if idx >= self.global_state.u64_vals.len() { - self.status = MachineStatus::Errored; - break; + error!(); } else { self.global_state.u64_vals[idx] = val } @@ -1855,8 +1907,7 @@ impl Machine { bail!("missing requested preimage for hash {}", hash); } } else { - self.status = MachineStatus::Errored; - break; + error!(); } } Opcode::ReadInboxMessage => { @@ -1867,8 +1918,7 @@ impl Machine { argument_data_to_inbox(inst.argument_data).expect("Bad inbox indentifier"); if let Some(message) = self.inbox_contents.get(&(inbox_identifier, msg_num)) { if ptr as u64 + 32 > module.memory.size() { - self.status = MachineStatus::Errored; - break; + error!(); } else { let offset = usize::try_from(offset).unwrap(); let len = std::cmp::min(32, message.len().saturating_sub(offset)); @@ -1876,8 +1926,7 @@ impl Machine { if module.memory.store_slice_aligned(ptr.into(), read) { self.value_stack.push(Value::I32(len as u32)); } else { - self.status = MachineStatus::Errored; - break; + error!(); } } } else { @@ -1895,7 +1944,8 @@ impl Machine { if self.is_halted() && !self.stdio_output.is_empty() { // If we halted, print out any trailing output that didn't have a newline. println!( - "\x1b[33mWASM says:\x1b[0m {}", + "{} {}", + Color::yellow("WASM says:"), String::from_utf8_lossy(&self.stdio_output), ); self.stdio_output.clear(); @@ -2011,7 +2061,6 @@ impl Machine { h.update(b"Machine running:"); h.update(&hash_value_stack(&self.value_stack)); h.update(&hash_value_stack(&self.internal_stack)); - h.update(&hash_pc_stack(&self.block_stack)); h.update(hash_stack_frame_stack(&self.frame_stack)); h.update(self.global_state.hash()); h.update(&u32::try_from(self.pc.module).unwrap().to_be_bytes()); @@ -2053,10 +2102,6 @@ impl Machine { |v| v.serialize_for_proof(), )); - data.extend(prove_stack(&self.block_stack, 1, hash_pc_stack, |pc| { - (*pc as u32).to_be_bytes() - })); - data.extend(prove_window( &self.frame_stack, hash_stack_frame_stack, diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index b65fcc1529..9a2b8eb62b 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -4,11 +4,10 @@ use eyre::{Context, Result}; use fnv::{FnvHashMap as HashMap, FnvHashSet as HashSet}; use prover::{ - machine::{GlobalState, InboxIdentifier, Machine, MachineStatus, PreimageResolver}, + machine::{GlobalState, InboxIdentifier, Machine, MachineStatus, PreimageResolver, ProofInfo}, utils::{Bytes32, CBytes}, wavm::Opcode, }; -use serde::Serialize; use sha3::{Digest, Keccak256}; use std::io::BufWriter; use std::sync::Arc; @@ -75,13 +74,6 @@ struct Opts { generate_binaries: Option, } -#[derive(Serialize)] -struct ProofInfo { - before: String, - proof: String, - after: String, -} - fn parse_size_delim(path: &Path) -> Result>> { let mut file = BufReader::new(File::open(path)?); let mut contents = Vec::new(); @@ -188,6 +180,7 @@ fn main() -> Result<()> { let mut mach = Machine::from_paths( &opts.libraries, &opts.binary, + true, opts.always_merkleize, opts.allow_hostapi, global_state, diff --git a/arbitrator/prover/src/memory.rs b/arbitrator/prover/src/memory.rs index ec691463c2..3276c280dc 100644 --- a/arbitrator/prover/src/memory.rs +++ b/arbitrator/prover/src/memory.rs @@ -16,7 +16,8 @@ use std::{borrow::Cow, convert::TryFrom}; pub struct Memory { buffer: Vec, #[serde(skip)] - merkle: Option, + pub merkle: Option, + pub max_size: u64, } fn hash_leaf(bytes: [u8; Memory::LEAF_SIZE]) -> Bytes32 { @@ -48,15 +49,16 @@ fn div_round_up(num: usize, denom: usize) -> usize { impl Memory { pub const LEAF_SIZE: usize = 32; /// Only used when initializing a memory to determine its size - pub const PAGE_SIZE: usize = 65536; + pub const PAGE_SIZE: u64 = 65536; /// The number of layers in the memory merkle tree /// 1 + log2(2^32 / LEAF_SIZE) = 1 + log2(2^(32 - log2(LEAF_SIZE))) = 1 + 32 - 5 const MEMORY_LAYERS: usize = 1 + 32 - 5; - pub fn new(size: usize) -> Memory { + pub fn new(size: usize, max_size: u64) -> Memory { Memory { buffer: vec![0u8; size], merkle: None, + max_size, } } @@ -106,6 +108,7 @@ impl Memory { let mut h = Keccak256::new(); h.update("Memory:"); h.update((self.buffer.len() as u64).to_be_bytes()); + h.update(self.max_size.to_be_bytes()); h.update(self.merkelize().root()); h.finalize().into() } diff --git a/arbitrator/prover/src/utils.rs b/arbitrator/prover/src/utils.rs index f0b3b4bf73..4579536eda 100644 --- a/arbitrator/prover/src/utils.rs +++ b/arbitrator/prover/src/utils.rs @@ -102,19 +102,6 @@ impl fmt::Debug for Bytes32 { } } -impl From for TableType { - fn from(table: DeprecatedTableType) -> Self { - Self { - element_type: match table.ty { - DeprecatedRefType::FuncRef => Type::FuncRef, - DeprecatedRefType::ExternRef => Type::ExternRef, - }, - initial: table.limits.minimum_size, - maximum: table.limits.maximum_size, - } - } -} - /// A Vec allocated with libc::malloc pub struct CBytes { ptr: *mut u8, @@ -144,43 +131,9 @@ impl Default for CBytes { } } -// TODO: remove this when re-initializing the rollup -// this is kept around to deserialize old binaries -#[derive(Serialize, Deserialize)] -pub enum DeprecatedRefType { - FuncRef, - ExternRef, -} - -// TODO: remove this when re-initializing the rollup -// this is kept around to deserialize old binaries -#[derive(Serialize, Deserialize)] -pub struct DeprecatedLimits { - pub minimum_size: u32, - pub maximum_size: Option, -} - -// TODO: remove this when re-initializing the rollup -// this is kept around to deserialize old binaries -#[derive(Serialize, Deserialize)] -pub struct DeprecatedTableType { - pub ty: DeprecatedRefType, - pub limits: DeprecatedLimits, -} - -impl From for DeprecatedTableType { - fn from(table: TableType) -> Self { - Self { - ty: match table.element_type { - Type::FuncRef => DeprecatedRefType::FuncRef, - Type::ExternRef => DeprecatedRefType::ExternRef, - x => panic!("impossible table type {:?}", x), - }, - limits: DeprecatedLimits { - minimum_size: table.initial, - maximum_size: table.maximum, - }, - } +impl fmt::Debug for CBytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.as_slice()) } } @@ -203,6 +156,27 @@ impl From<&[u8]> for CBytes { } } +#[derive(Serialize, Deserialize)] +#[serde(remote = "Type")] +enum RemoteType { + I32, + I64, + F32, + F64, + V128, + FuncRef, + ExternRef, +} + +#[derive(Serialize, Deserialize)] +#[serde(remote = "TableType")] +pub struct RemoteTableType { + #[serde(with = "RemoteType")] + pub element_type: Type, + pub initial: u32, + pub maximum: Option, +} + impl Drop for CBytes { fn drop(&mut self) { unsafe { libc::free(self.ptr as _) } diff --git a/arbitrator/prover/src/value.rs b/arbitrator/prover/src/value.rs index 9ad0c83440..9267a21782 100644 --- a/arbitrator/prover/src/value.rs +++ b/arbitrator/prover/src/value.rs @@ -3,7 +3,7 @@ use std::convert::TryFrom; -use crate::{binary::FloatType, utils::Bytes32}; +use crate::{binary::FloatType, console::Color, utils::Bytes32}; use digest::Digest; use eyre::{bail, Result}; use serde::{Deserialize, Serialize}; @@ -20,7 +20,6 @@ pub enum ArbValueType { RefNull, FuncRef, InternalRef, - StackBoundary, } impl ArbValueType { @@ -96,7 +95,6 @@ pub enum Value { RefNull, FuncRef(u32), InternalRef(ProgramCounter), - StackBoundary, } impl Value { @@ -109,7 +107,6 @@ impl Value { Value::RefNull => ArbValueType::RefNull, Value::FuncRef(_) => ArbValueType::FuncRef, Value::InternalRef(_) => ArbValueType::InternalRef, - Value::StackBoundary => ArbValueType::StackBoundary, } } @@ -122,7 +119,6 @@ impl Value { Value::RefNull => Bytes32::default(), Value::FuncRef(x) => x.into(), Value::InternalRef(pc) => pc.serialize(), - Value::StackBoundary => Bytes32::default(), } } @@ -186,9 +182,52 @@ impl Value { ArbValueType::RefNull | ArbValueType::FuncRef | ArbValueType::InternalRef => { Value::RefNull } - ArbValueType::StackBoundary => { - panic!("Attempted to make default of StackBoundary type") + } + } + + pub fn pretty_print(&self) -> String { + let lparem = Color::grey("("); + let rparem = Color::grey(")"); + + macro_rules! single { + ($ty:expr, $value:expr) => {{ + format!("{}{}{}{}", Color::grey($ty), lparem, $value, rparem) + }}; + } + macro_rules! pair { + ($ty:expr, $left:expr, $right:expr) => {{ + let eq = Color::grey("="); + format!( + "{}{}{} {} {}{}", + Color::grey($ty), + lparem, + $left, + eq, + $right, + rparem + ) + }}; + } + match self { + Value::I32(value) => { + if (*value as i32) < 0 { + pair!("i32", *value as i32, value) + } else { + single!("i32", *value) + } + } + Value::I64(value) => { + if (*value as i64) < 0 { + pair!("i64", *value as i64, value) + } else { + single!("i64", *value) + } } + Value::F32(value) => single!("f32", *value), + Value::F64(value) => single!("f64", *value), + Value::RefNull => "null".into(), + Value::FuncRef(func) => format!("func {}", func), + Value::InternalRef(pc) => format!("inst {} in {}-{}", pc.inst, pc.module, pc.func), } } } diff --git a/arbitrator/prover/src/wavm.rs b/arbitrator/prover/src/wavm.rs index 58a1499ad6..2a44fc0cd6 100644 --- a/arbitrator/prover/src/wavm.rs +++ b/arbitrator/prover/src/wavm.rs @@ -71,10 +71,6 @@ pub enum IBinOpType { pub enum Opcode { Unreachable, Nop, - Block, - // Loop and If are wrapped into Block - Branch, - BranchIf, Return, Call, @@ -130,24 +126,16 @@ pub enum Opcode { IBinOp(IntegerValType, IBinOpType), // Custom opcodes not in WASM. Documented more in "Custom opcodes.md". - /// Branch is partially split up into these. - EndBlock, - /// Custom opcode not in wasm. - /// Like "EndBlock" but conditional. - /// Keeps its condition on the stack. - EndBlockIf, /// Custom opcode not in wasm. InitFrame, + /// Unconditional jump to an arbitrary point in code. + ArbitraryJump, /// Conditional jump to an arbitrary point in code. ArbitraryJumpIf, - /// Push a Value::StackBoundary to the stack - PushStackBoundary, /// Pop a value from the value stack and push it to the internal stack MoveFromStackToInternal, /// Pop a value from the internal stack and push it to the value stack MoveFromInternalToStack, - /// Pop a value from the value stack, then push an I32 1 if it's a stack boundary, I32 0 otherwise. - IsStackBoundary, /// Duplicate the top value on the stack Dup, /// Call a function in a different module @@ -168,8 +156,6 @@ pub enum Opcode { ReadInboxMessage, /// Stop exexcuting the machine and move to the finished status HaltAndSetFinished, - /// Unconditional jump to an arbitrary point in code. - ArbitraryJump, } impl Opcode { @@ -177,9 +163,6 @@ impl Opcode { match self { Opcode::Unreachable => 0x00, Opcode::Nop => 0x01, - Opcode::Block => 0x02, - Opcode::Branch => 0x0C, - Opcode::BranchIf => 0x0D, Opcode::Return => 0x0F, Opcode::Call => 0x10, Opcode::CallIndirect => 0x11, @@ -268,14 +251,11 @@ impl Opcode { _ => panic!("Unsupported {:?}", self), }, // Internal instructions: - Opcode::EndBlock => 0x8000, - Opcode::EndBlockIf => 0x8001, Opcode::InitFrame => 0x8002, - Opcode::ArbitraryJumpIf => 0x8003, - Opcode::PushStackBoundary => 0x8004, + Opcode::ArbitraryJump => 0x8003, + Opcode::ArbitraryJumpIf => 0x8004, Opcode::MoveFromStackToInternal => 0x8005, Opcode::MoveFromInternalToStack => 0x8006, - Opcode::IsStackBoundary => 0x8007, Opcode::Dup => 0x8008, Opcode::CrossModuleCall => 0x8009, Opcode::CallerModuleInternalCall => 0x800A, @@ -286,7 +266,6 @@ impl Opcode { Opcode::ReadPreImage => 0x8020, Opcode::ReadInboxMessage => 0x8021, Opcode::HaltAndSetFinished => 0x8022, - Opcode::ArbitraryJump => 0x8023, } } @@ -960,16 +939,16 @@ pub fn wasm_to_wavm<'a>( F64Max => float!(BinOp, F64, Max), F64Copysign => float!(BinOp, F64, CopySign), I32WrapI64 => opcode!(I32WrapI64), - I32TruncF32S => float!(TruncIntOp, I32, F32, true), - I32TruncF32U => float!(TruncIntOp, I32, F32, false), - I32TruncF64S => float!(TruncIntOp, I32, F64, true), - I32TruncF64U => float!(TruncIntOp, I32, F64, false), + I32TruncF32S => float!(TruncIntOp, I32, F32, false, true), + I32TruncF32U => float!(TruncIntOp, I32, F32, false, false), + I32TruncF64S => float!(TruncIntOp, I32, F64, false, true), + I32TruncF64U => float!(TruncIntOp, I32, F64, false, false), I64ExtendI32S => opcode!(I64ExtendI32(true)), I64ExtendI32U => opcode!(I64ExtendI32(false)), - I64TruncF32S => float!(TruncIntOp, I64, F32, true), - I64TruncF32U => float!(TruncIntOp, I64, F32, false), - I64TruncF64S => float!(TruncIntOp, I64, F64, true), - I64TruncF64U => float!(TruncIntOp, I64, F64, false), + I64TruncF32S => float!(TruncIntOp, I64, F32, false, true), + I64TruncF32U => float!(TruncIntOp, I64, F32, false, false), + I64TruncF64S => float!(TruncIntOp, I64, F64, false, true), + I64TruncF64U => float!(TruncIntOp, I64, F64, false, false), F32ConvertI32S => float!(ConvertIntOp, F32, I32, true), F32ConvertI32U => float!(ConvertIntOp, F32, I32, false), F32ConvertI64S => float!(ConvertIntOp, F32, I64, true), @@ -989,14 +968,14 @@ pub fn wasm_to_wavm<'a>( I64Extend8S => opcode!(I64ExtendS(8)), I64Extend16S => opcode!(I64ExtendS(16)), I64Extend32S => opcode!(I64ExtendS(32)), - I32TruncSatF32S => float!(TruncIntOp, I32, F32, true), - I32TruncSatF32U => float!(TruncIntOp, I32, F32, false), - I32TruncSatF64S => float!(TruncIntOp, I32, F64, true), - I32TruncSatF64U => float!(TruncIntOp, I32, F64, false), - I64TruncSatF32S => float!(TruncIntOp, I64, F32, true), - I64TruncSatF32U => float!(TruncIntOp, I64, F32, false), - I64TruncSatF64S => float!(TruncIntOp, I64, F64, true), - I64TruncSatF64U => float!(TruncIntOp, I64, F64, false), + I32TruncSatF32S => float!(TruncIntOp, I32, F32, true, true), + I32TruncSatF32U => float!(TruncIntOp, I32, F32, true, false), + I32TruncSatF64S => float!(TruncIntOp, I32, F64, true, true), + I32TruncSatF64U => float!(TruncIntOp, I32, F64, true, false), + I64TruncSatF32S => float!(TruncIntOp, I64, F32, true, true), + I64TruncSatF32U => float!(TruncIntOp, I64, F32, true, false), + I64TruncSatF64S => float!(TruncIntOp, I64, F64, true, true), + I64TruncSatF64U => float!(TruncIntOp, I64, F64, true, false), unsupported @ ( dot!( diff --git a/arbitrator/prover/test-cases/float32.wat b/arbitrator/prover/test-cases/float32.wat index a136bf6760..b49b7e769b 100644 --- a/arbitrator/prover/test-cases/float32.wat +++ b/arbitrator/prover/test-cases/float32.wat @@ -170,63 +170,63 @@ ;; f32 -> i32 truncation (f32.const -2.5) - (i32.trunc_f32_s) + (i32.trunc_sat_f32_s) (call $assert_i32 (i32.const -2)) (f32.const -2.5) - (i32.trunc_f32_u) + (i32.trunc_sat_f32_u) (call $assert_i32 (i32.const 0)) (f32.const 1000000000000) - (i32.trunc_f32_u) + (i32.trunc_sat_f32_u) (i32.const -1) (call $assert_i32) (f32.const 1000000000000) - (i32.trunc_f32_s) + (i32.trunc_sat_f32_s) (i32.const 1) (i32.shl (i32.const 31)) (i32.sub (i32.const 1)) (call $assert_i32) (f32.const 1000000000000) - (i32.trunc_f32_s) + (i32.trunc_sat_f32_s) (i32.gt_s (i32.const 0)) (call $assert_true) (f32.const -1000000000000) - (i32.trunc_f32_s) + (i32.trunc_sat_f32_s) (i32.const 1) (i32.shl (i32.const 31)) (call $assert_i32) (f32.const -1000000000000) - (i32.trunc_f32_s) + (i32.trunc_sat_f32_s) (i32.lt_s (i32.const 0)) (call $assert_true) ;; f32 -> i64 truncation (f32.const -2.5) - (i64.trunc_f32_s) + (i64.trunc_sat_f32_s) (call $assert_i64 (i64.const -2)) (f32.const -2.5) - (i64.trunc_f32_u) + (i64.trunc_sat_f32_u) (call $assert_i64 (i64.const 0)) (f32.const 1000000000000000000000) - (i64.trunc_f32_u) + (i64.trunc_sat_f32_u) (i64.const -1) (call $assert_i64) (f32.const 1000000000000000000000) - (i64.trunc_f32_s) + (i64.trunc_sat_f32_s) (i64.const 1) (i64.shl (i64.const 63)) (i64.sub (i64.const 1)) (call $assert_i64) (f32.const 1000000000000000000000) - (i64.trunc_f32_s) + (i64.trunc_sat_f32_s) (i64.gt_s (i64.const 0)) (call $assert_true) (f32.const -1000000000000000000000) - (i64.trunc_f32_s) + (i64.trunc_sat_f32_s) (i64.const 1) (i64.shl (i64.const 63)) (call $assert_i64) (f32.const -1000000000000000000000) - (i64.trunc_f32_s) + (i64.trunc_sat_f32_s) (i64.lt_s (i64.const 0)) (call $assert_true) diff --git a/arbitrator/prover/test-cases/float64.wat b/arbitrator/prover/test-cases/float64.wat index 7c22e10d8a..791ba1de8f 100644 --- a/arbitrator/prover/test-cases/float64.wat +++ b/arbitrator/prover/test-cases/float64.wat @@ -170,63 +170,63 @@ ;; f64 -> i32 truncation (f64.const -2.5) - (i32.trunc_f64_s) + (i32.trunc_sat_f64_s) (call $assert_i32 (i32.const -2)) (f64.const -2.5) - (i32.trunc_f64_u) + (i32.trunc_sat_f64_u) (call $assert_i32 (i32.const 0)) (f64.const 1000000000000) - (i32.trunc_f64_u) + (i32.trunc_sat_f64_u) (i32.const -1) (call $assert_i32) (f64.const 1000000000000) - (i32.trunc_f64_s) + (i32.trunc_sat_f64_s) (i32.const 1) (i32.shl (i32.const 63)) (i32.sub (i32.const 1)) (call $assert_i32) (f64.const 1000000000000) - (i32.trunc_f64_s) + (i32.trunc_sat_f64_s) (i32.gt_s (i32.const 0)) (call $assert_true) (f64.const -1000000000000) - (i32.trunc_f64_s) + (i32.trunc_sat_f64_s) (i32.const 1) (i32.shl (i32.const 63)) (call $assert_i32) (f64.const -1000000000000) - (i32.trunc_f64_s) + (i32.trunc_sat_f64_s) (i32.lt_s (i32.const 0)) (call $assert_true) ;; f64 -> i64 truncation (f64.const -2.5) - (i64.trunc_f64_s) + (i64.trunc_sat_f64_s) (call $assert_i64 (i64.const -2)) (f64.const -2.5) - (i64.trunc_f64_u) + (i64.trunc_sat_f64_u) (call $assert_i64 (i64.const 0)) (f64.const 1000000000000000000000) - (i64.trunc_f64_u) + (i64.trunc_sat_f64_u) (i64.const -1) (call $assert_i64) (f64.const 1000000000000000000000) - (i64.trunc_f64_s) + (i64.trunc_sat_f64_s) (i64.const 1) (i64.shl (i64.const 63)) (i64.sub (i64.const 1)) (call $assert_i64) (f64.const 1000000000000000000000) - (i64.trunc_f64_s) + (i64.trunc_sat_f64_s) (i64.gt_s (i64.const 0)) (call $assert_true) (f64.const -1000000000000000000000) - (i64.trunc_f64_s) + (i64.trunc_sat_f64_s) (i64.const 1) (i64.shl (i64.const 63)) (call $assert_i64) (f64.const -1000000000000000000000) - (i64.trunc_f64_s) + (i64.trunc_sat_f64_s) (i64.lt_s (i64.const 0)) (call $assert_true) diff --git a/arbitrator/wasm-libraries/soft-float/bindings32.c b/arbitrator/wasm-libraries/soft-float/bindings32.c index be550b03d1..f0e54ba8f2 100644 --- a/arbitrator/wasm-libraries/soft-float/bindings32.c +++ b/arbitrator/wasm-libraries/soft-float/bindings32.c @@ -108,8 +108,10 @@ uint32_t wavm__f32_div(uint32_t va, uint32_t vb) { uint32_t wavm__f32_min(uint32_t va, uint32_t vb) { float32_t a = {va}; float32_t b = {vb}; - if (f32_isNaN(a) || f32_isNaN(b)) { + if (f32_isNaN(a)) { return a.v; + } else if (f32_isNaN(b)) { + return b.v; } else if (f32_isInfinity(a) && f32_isNegative(a)) { return a.v; } else if (f32_isInfinity(b) && f32_isNegative(b)) { @@ -132,8 +134,10 @@ uint32_t wavm__f32_min(uint32_t va, uint32_t vb) { uint32_t wavm__f32_max(uint32_t va, uint32_t vb) { float32_t a = {va}; float32_t b = {vb}; - if (f32_isNaN(a) || f32_isNaN(b)) { + if (f32_isNaN(a)) { return a.v; + } else if (f32_isNaN(b)) { + return b.v; } else if (f32_isInfinity(a) && !f32_isNegative(a)) { return a.v; } else if (f32_isInfinity(b) && !f32_isNegative(b)) { @@ -198,39 +202,113 @@ uint8_t wavm__f32_ge(uint32_t va, uint32_t vb) { } int32_t wavm__i32_trunc_f32_s(uint32_t v) { - float32_t f = {v}; - // A rounded up floating point version of 1 << 32 - float32_t max = {0x4f800000}; - if (f32_le(max, f)) { - return (1u << 31) - 1; + // signed truncation is defined over (i32::min - 1, i32::max + 1) + float32_t max = {0x4f000000}; // i32::max + 1 = 0x4F000000 + float32_t min = {0xcf000001}; // i32::min - 1 = 0xCF000000 (adjusted due to rounding) + float32_t val = {v}; + if (f32_isNaN(val) || f32_le(max, val) || f32_le(val, min)) { + __builtin_trap(); } - return f32_to_i32(f, softfloat_round_minMag, true); + return f32_to_i32(val, softfloat_round_minMag, true); +} + +int32_t wavm__i32_trunc_sat_f32_s(uint32_t v) { + // signed truncation is defined over (i32::min - 1, i32::max + 1) + float32_t max = {0x4f000000}; // i32::max + 1 = 0x4F000000 + float32_t min = {0xcf000001}; // i32::min - 1 = 0xCF000000 (adjusted due to rounding) + float32_t val = {v}; + if (f32_isNaN(val)) { + return 0; + } + if (f32_le(max, val)) { + return 2147483647; + } + if (f32_le(val, min)) { + return -2147483648; + } + return f32_to_i32(val, softfloat_round_minMag, true); } uint32_t wavm__i32_trunc_f32_u(uint32_t v) { - float32_t f = {v}; - if (f32_isNegative(f)) { + // unsigned truncation is defined over (-1, u32::max + 1) + float32_t max = {0x4f800000}; // u32::max + 1 = 0x4f800000 + float32_t min = {0xbf800000}; // -1 = 0xbf800000 + float32_t val = {v}; + if (f32_isNaN(val) || f32_le(max, val) || f32_le(val, min)) { + __builtin_trap(); + } + if (f32_isNegative(val)) { + return 0; + } + return f32_to_ui32(val, softfloat_round_minMag, true); +} + +uint32_t wavm__i32_trunc_sat_f32_u(uint32_t v) { + // unsigned truncation is defined over (-1, u32::max + 1) + float32_t max = {0x4f800000}; // u32::max + 1 = 0x4f800000 + float32_t val = {v}; + if (f32_isNaN(val) || f32_isNegative(val)) { return 0; } - return f32_to_ui32(f, softfloat_round_minMag, true); + if (f32_le(max, val)) { + return ~0u; + } + return f32_to_ui32(val, softfloat_round_minMag, true); } int64_t wavm__i64_trunc_f32_s(uint32_t v) { - float32_t f = {v}; - // A rounded down floating point version of 1 << 64 - float32_t max = {0x5f800000}; - if (f32_lt(max, f)) { - return (1ull << 63) - 1; + // unsigned truncation is defined over (i64::min - 1, i64::max + 1) + float32_t max = {0x5f000000}; // i64::max + 1 = 0x5f000000 + float32_t min = {0xdf000001}; // i64::min - 1 = 0xdf000000 (adjusted due to rounding) + float32_t val = {v}; + if (f32_isNaN(val) || f32_le(max, val) || f32_le(val, min)) { + __builtin_trap(); + } + return f32_to_i64(val, softfloat_round_minMag, true); +} + +int64_t wavm__i64_trunc_sat_f32_s(uint32_t v) { + // unsigned truncation is defined over (i64::min - 1, i64::max + 1) + float32_t max = {0x5f000000}; // i64::max + 1 = 0x5f000000 + float32_t min = {0xdf000001}; // i64::min - 1 = 0xdf000000 (adjusted due to rounding) + float32_t val = {v}; + if (f32_isNaN(val)) { + return 0; + } + if (f32_le(max, val)) { + return 9223372036854775807ll; + } + if (f32_le(val, min)) { + return -(((int64_t) 1) << 63); } - return f32_to_i64(f, softfloat_round_minMag, true); + return f32_to_i64(val, softfloat_round_minMag, true); } uint64_t wavm__i64_trunc_f32_u(uint32_t v) { - float32_t f = {v}; - if (f32_isNegative(f)) { + // unsigned truncation is defined over (-1, i64::max + 1) + float32_t max = {0x5f800000}; // i64::max + 1 = 0x5f800000 + float32_t min = {0xbf800000}; // -1 = 0xbf800000 + float32_t val = {v}; + if (f32_isNaN(val) || f32_le(max, val) || f32_le(val, min)) { + __builtin_trap(); + } + if (f32_isNegative(val)) { return 0; } - return f32_to_ui64(f, softfloat_round_minMag, true); + return f32_to_ui64(val, softfloat_round_minMag, true); +} + +uint64_t wavm__i64_trunc_sat_f32_u(uint32_t v) { + // unsigned truncation is defined over (-1, i64::max + 1) + float32_t max = {0x5f800000}; // i64::max + 1 = 0x5f800000 + float32_t val = {v}; + if (f32_isNaN(val) || f32_isNegative(val)) { + return 0; + } + if (f32_le(max, val)) { + return ~0ull; + } + return f32_to_ui64(val, softfloat_round_minMag, true); } uint32_t wavm__f32_convert_i32_s(int32_t x) { diff --git a/arbitrator/wasm-libraries/soft-float/bindings64.c b/arbitrator/wasm-libraries/soft-float/bindings64.c index 9d56b8c3e5..968b76fdbc 100644 --- a/arbitrator/wasm-libraries/soft-float/bindings64.c +++ b/arbitrator/wasm-libraries/soft-float/bindings64.c @@ -108,8 +108,10 @@ uint64_t wavm__f64_div(uint64_t va, uint64_t vb) { uint64_t wavm__f64_min(uint64_t va, uint64_t vb) { float64_t a = {va}; float64_t b = {vb}; - if (f64_isNaN(a) || f64_isNaN(b)) { + if (f64_isNaN(a)) { return a.v; + } else if (f64_isNaN(b)) { + return b.v; } else if (f64_isInfinity(a) && f64_isNegative(a)) { return a.v; } else if (f64_isInfinity(b) && f64_isNegative(b)) { @@ -132,10 +134,10 @@ uint64_t wavm__f64_min(uint64_t va, uint64_t vb) { uint64_t wavm__f64_max(uint64_t va, uint64_t vb) { float64_t a = {va}; float64_t b = {vb}; - if (f64_isNaN(a) || f64_isNaN(b)) { - return a.v; - } else if (f64_isInfinity(a) && !f64_isNegative(a)) { + if (f64_isNaN(a)) { return a.v; + } else if (f64_isNaN(b)) { + return b.v; } else if (f64_isInfinity(b) && !f64_isNegative(b)) { return b.v; } else if (f64_isInfinity(a) && f64_isNegative(a)) { @@ -198,41 +200,115 @@ uint8_t wavm__f64_ge(uint64_t va, uint64_t vb) { } int32_t wavm__i32_trunc_f64_s(uint64_t v) { - float64_t f = {v}; - // An exact floating point version of 1 << 32 - float64_t max = {0x41f0000000000000}; - if (f64_le(max, f)) { - return (1u << 31) - 1; + // signed truncation is defined over (i32::min - 1, i32::max + 1) + float64_t max = {0x41e0000000000000}; // i32::max + 1 = 0x41e0000000000000 + float64_t min = {0xc1e0000000200000}; // i32::min - 1 = 0xc1e0000000200000 + float64_t val = {v}; + if (f64_isNaN(val) || f64_le(max, val) || f64_le(val, min)) { + __builtin_trap(); + } + return f64_to_i32(val, softfloat_round_minMag, true); +} + +int32_t wavm__i32_trunc_sat_f64_s(uint64_t v) { + // signed truncation is defined over (i32::min - 1, i32::max + 1) + float64_t max = {0x41e0000000000000}; // i32::max + 1 = 0x41e0000000000000 + float64_t min = {0xc1e0000000200000}; // i32::min - 1 = 0xc1e0000000200000 + float64_t val = {v}; + if (f64_isNaN(val)) { + return 0; + } + if (f64_le(max, val)) { + return 2147483647; } - return f64_to_i32(f, softfloat_round_minMag, true); + if (f64_le(val, min)) { + return -2147483648; + } + return f64_to_i32(val, softfloat_round_minMag, true); } uint32_t wavm__i32_trunc_f64_u(uint64_t v) { - float64_t f = {v}; - if (f64_isNegative(f)) { + // unsigned truncation is defined over (-1, u32::max + 1) + float64_t max = {0x41f0000000000000}; // u32::max + 1 = 0x41f0000000000000 + float64_t min = {0xbff0000000000000}; // -1 = 0xbff0000000000000 + float64_t val = {v}; + if (f64_isNaN(val) || f64_le(max, val) || f64_le(val, min)) { + __builtin_trap(); + } + if (f64_isNegative(val)) { + return 0; + } + return f64_to_ui32(val, softfloat_round_minMag, true); +} + +uint32_t wavm__i32_trunc_sat_f64_u(uint64_t v) { + // unsigned truncation is defined over (-1, u32::max + 1) + float64_t max = {0x41f0000000000000}; // u32::max + 1 = 0x41f0000000000000 + float64_t val = {v}; + if (f64_isNaN(val) || f64_isNegative(val)) { return 0; } - return f64_to_ui32(f, softfloat_round_minMag, true); + if (f64_le(max, val)) { + return ~0u; + } + return f64_to_ui32(val, softfloat_round_minMag, true); } int64_t wavm__i64_trunc_f64_s(uint64_t v) { - float64_t f = {v}; - // A rounded up floating point version of 1 << 32 - float64_t max = {0x43f0000000000000}; - if (f64_le(max, f)) { - return (1ull << 63) - 1; + // signed truncation is defined over (i64::min - 1, u64::max + 1) + float64_t max = {0x43e0000000000000}; // i64::max + 1 = 0x43e0000000000000 + float64_t min = {0xc3e0000000000001}; // i64::min - 1 = 0xc3e0000000000000 (adjusted due to rounding) + float64_t val = {v}; + if (f64_isNaN(val) || f64_le(max, val) || f64_le(val, min)) { + __builtin_trap(); + } + return f64_to_i64(val, softfloat_round_minMag, true); +} + +int64_t wavm__i64_trunc_sat_f64_s(uint64_t v) { + // signed truncation is defined over (i64::min - 1, u64::max + 1) + float64_t max = {0x43e0000000000000}; // i64::max + 1 = 0x43e0000000000000 + float64_t min = {0xc3e0000000000001}; // i64::min - 1 = 0xc3e0000000000000 (adjusted due to rounding) + float64_t val = {v}; + if (f64_isNaN(val)) { + return 0; } - return f64_to_i64(f, softfloat_round_minMag, true); + if (f64_le(max, val)) { + return 9223372036854775807ll; + } + if (f64_le(val, min)) { + return -(((int64_t) 1) << 63); + } + return f64_to_i64(val, softfloat_round_minMag, true); } uint64_t wavm__i64_trunc_f64_u(uint64_t v) { + // unsigned truncation is defined over (-1, u64::max + 1) + float64_t max = {0x43f0000000000000}; // u64::max + 1 = 0x43f0000000000000 + float64_t min = {0xbff0000000000000}; // -1 = 0xbff0000000000000 float64_t f = {v}; + if (f64_isNaN(f) || f64_le(max, f) || f64_le(f, min)) { + __builtin_trap(); + } if (f64_isNegative(f)) { return 0; } return f64_to_ui64(f, softfloat_round_minMag, true); } +uint64_t wavm__i64_trunc_sat_f64_u(uint64_t v) { + // unsigned truncation is defined over (-1, u64::max + 1) + float64_t max = {0x43f0000000000000}; // u64::max + 1 = 0x43f0000000000000 + float64_t val = {v}; + if (f64_isNaN(val) || f64_isNegative(val)) { + return 0; + } + if (f64_le(max, val)) { + return 18446744073709551615ull; + } + return f64_to_ui64(val, softfloat_round_minMag, true); +} + uint64_t wavm__f64_convert_i32_s(int32_t x) { return i32_to_f64(x).v; } diff --git a/arbitrator/wasm-testsuite/.gitignore b/arbitrator/wasm-testsuite/.gitignore new file mode 100644 index 0000000000..e7e1fb04f4 --- /dev/null +++ b/arbitrator/wasm-testsuite/.gitignore @@ -0,0 +1 @@ +tests/* diff --git a/arbitrator/wasm-testsuite/Cargo.lock b/arbitrator/wasm-testsuite/Cargo.lock new file mode 100644 index 0000000000..60e48adf1f --- /dev/null +++ b/arbitrator/wasm-testsuite/Cargo.lock @@ -0,0 +1,772 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "block-padding", + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + +[[package]] +name = "brotli-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "brotli2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e" +dependencies = [ + "brotli-sys", + "libc", +] + +[[package]] +name = "cc" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +dependencies = [ + "cfg-if", + "lazy_static", +] + +[[package]] +name = "darling" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "eyre" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "generic-array" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.125" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "nom" +version = "7.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nom-leb128" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52a73b6c3a9ecfff12ad50dedba051ef838d2f478d938bb3e6b3842431028e62" +dependencies = [ + "arrayvec", + "nom", + "num-traits", +] + +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "prover" +version = "0.1.0" +dependencies = [ + "bincode", + "brotli2", + "digest", + "eyre", + "fnv", + "hex", + "libc", + "nom", + "nom-leb128", + "num", + "rayon", + "rustc-demangle", + "serde", + "serde_json", + "serde_with", + "sha3", + "static_assertions", + "structopt", + "wasmparser", +] + +[[package]] +name = "quote" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd249e82c21598a9a426a4e00dd7adc1d640b22445ec8545feef801d1a74c221" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "num_cpus", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "serde" +version = "1.0.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b827f2113224f3f19a665136f006709194bdfdcb1fdc1e4b2b5cbac8e0cced54" +dependencies = [ + "rustversion", + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer", + "digest", + "keccak", + "opaque-debug", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "structopt" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "typenum" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" + +[[package]] +name = "unicode-segmentation" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" + +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + +[[package]] +name = "unicode-xid" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasm-testsuite" +version = "0.1.0" +dependencies = [ + "eyre", + "hex", + "prover", + "serde", + "serde_json", + "structopt", +] + +[[package]] +name = "wasmparser" +version = "0.84.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77dc97c22bb5ce49a47b745bed8812d30206eff5ef3af31424f2c1820c0974b2" +dependencies = [ + "indexmap", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/arbitrator/wasm-testsuite/Cargo.toml b/arbitrator/wasm-testsuite/Cargo.toml new file mode 100644 index 0000000000..5ace2ca584 --- /dev/null +++ b/arbitrator/wasm-testsuite/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "wasm-testsuite" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prover = { path = "../prover" } +structopt = "0.3.23" +serde = { version = "1.0.130", features = ["derive", "rc"] } +serde_json = "1.0.67" +eyre = "0.6.5" +hex = "0.4.3" + +[workspace] +members = [] diff --git a/arbitrator/wasm-testsuite/check.sh b/arbitrator/wasm-testsuite/check.sh new file mode 100755 index 0000000000..9c67557dc8 --- /dev/null +++ b/arbitrator/wasm-testsuite/check.sh @@ -0,0 +1,24 @@ +#!/usr/bin/bash + +# Copyright 2022, Offchain Labs, Inc. +# For license information, see https://github.com/nitro/blob/master/LICENSE + +rm -rf tests ../../contracts/test/prover/spec-proofs +mkdir -p tests/ +mkdir -p ../../contracts/test/prover/spec-proofs/ + +for file in testsuite/*wast; do + wast="${file##testsuite/}" + json="tests/${wast%.wast}.json" + wast2json $file -o $json 2>/dev/null +done + +cargo build --release + +for file in tests/*.json; do + base="${file#tests/}" + name="${base%.wasm}" + target/release/wasm-testsuite $name & +done + +wait diff --git a/arbitrator/wasm-testsuite/src/main.rs b/arbitrator/wasm-testsuite/src/main.rs new file mode 100644 index 0000000000..4ff511d9de --- /dev/null +++ b/arbitrator/wasm-testsuite/src/main.rs @@ -0,0 +1,409 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +use eyre::bail; +use prover::{ + console::Color, + machine, + machine::{GlobalState, Machine, MachineStatus, ProofInfo}, + value::Value, +}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{HashMap, HashSet}, + fs::File, + io::BufReader, + path::PathBuf, + time::Instant, +}; +use structopt::StructOpt; + +#[derive(StructOpt)] +#[structopt(name = "wasm-testsuite")] +struct Opts { + json: PathBuf, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct Case { + source_filename: String, + commands: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Command { + Module { + filename: String, + }, + AssertReturn { + action: Action, + expected: Vec, + }, + AssertExhaustion { + action: Action, + }, + AssertTrap { + action: Action, + }, + Action { + action: Action, + }, + AssertMalformed { + filename: String, + }, + AssertInvalid {}, + AssertUninstantiable {}, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Action { + Invoke { field: String, args: Vec }, + Get { field: String }, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct TextValue { + #[serde(rename = "type")] + ty: TextValueType, + value: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +enum TextValueType { + I32, + I64, + F32, + F64, +} + +impl Into for TextValue { + fn into(self) -> Value { + match self.ty { + TextValueType::I32 => { + let value = self.value.parse().expect("not an i32"); + Value::I32(value) + } + TextValueType::I64 => { + let value = self.value.parse().expect("not an i64"); + Value::I64(value) + } + TextValueType::F32 => { + if self.value.contains("nan") { + return Value::F32(f32::NAN); + } + let message = format!("{} not the bit representation of an f32", self.value); + let bits: u32 = self.value.parse().expect(&message); + Value::F32(f32::from_bits(bits)) + } + TextValueType::F64 => { + if self.value.contains("nan") { + return Value::F64(f64::NAN); + } + let message = format!("{} not the bit representation of an f64", self.value); + let bits: u64 = self.value.parse().expect(&message); + Value::F64(f64::from_bits(bits)) + } + } + } +} + +impl PartialEq for TextValue { + fn eq(&self, other: &Value) -> bool { + if &Into::::into(self.clone()) == other { + return true; + } + + match self.ty { + TextValueType::F32 => match other { + Value::F32(value) => value.is_nan() && self.value.contains("nan"), + _ => false, + }, + TextValueType::F64 => match other { + Value::F64(value) => value.is_nan() && self.value.contains("nan"), + _ => false, + }, + _ => false, + } + } +} + +fn pretty_print_values(prefix: &str, values: Vec) { + let mut result = format!(" {} ", prefix); + for value in values { + result += &format!("{}, ", value.pretty_print()); + } + if result.len() > 2 { + result.pop(); + result.pop(); + } + println!("{}", result) +} + +fn main() -> eyre::Result<()> { + let opts = Opts::from_args(); + println!("test {:?}", opts.json); + + let mut path = PathBuf::from("tests/"); + path.push(&opts.json); + + let reader = BufReader::new(File::open(path)?); + let case: Case = serde_json::from_reader(reader)?; + let start_time = Instant::now(); + + let soft_float = PathBuf::from("../../target/machines/latest/soft-float.wasm"); + + // The modules listed below will be tested for compliance with the spec, but won't produce proofs for the OSP test. + // We list the soft-float modules because, while compliance is necessary, the funcs are comprised of opcodes + // better tested elsewhere and aren't worth 10x the test time. + let mut do_not_prove = HashSet::new(); + do_not_prove.insert(PathBuf::from("f32.json")); + do_not_prove.insert(PathBuf::from("f64.json")); + do_not_prove.insert(PathBuf::from("f32_cmp.json")); + do_not_prove.insert(PathBuf::from("f64_cmp.json")); + do_not_prove.insert(PathBuf::from("float_exprs.json")); + let export_proofs = !do_not_prove.contains(&opts.json); + if !export_proofs { + println!("{}", Color::grey("skipping OSP proof generation")); + } + + let mut wasmfile = String::new(); + let mut machine = None; + let mut subtest = 0; + let mut skip = false; + + macro_rules! run { + ($machine:expr, $bound:expr, $path:expr, $prove:expr) => {{ + let mut proofs = vec![]; + let mut count = 0; + let mut leap = 1; + let prove = $prove && export_proofs; + + if !prove { + $machine.step_n($bound)?; + } + + while count + leap < $bound && prove { + count += 1; + + let prior = $machine.hash().to_string(); + let proof = hex::encode($machine.serialize_proof()); + $machine.step_n(1)?; + let after = $machine.hash().to_string(); + proofs.push(ProofInfo::new(prior, proof, after)); + $machine.step_n(leap - 1)?; + + if count % 100 == 0 { + leap *= leap + 1; + if leap > 6 { + let message = format!("backing off {} {} {}", leap, count, $bound); + println!("{}", Color::grey(message)); + $machine.stop_merkle_caching(); + } + } + if $machine.is_halted() { + break; + } + } + if prove { + let out = File::create($path)?; + serde_json::to_writer_pretty(out, &proofs)?; + } + }}; + } + macro_rules! action { + ($action:expr) => { + match $action { + Action::Invoke { field, args } => (field, args), + Action::Get { .. } => { + // get() is only used in the export test, which we don't support + println!("skipping unsupported action {}", Color::red("get")); + continue; + } + } + }; + } + macro_rules! outname { + () => { + format!( + "../../contracts/test/prover/spec-proofs/{}-{:04}.json", + wasmfile, subtest + ) + }; + } + + for (index, command) in case.commands.into_iter().enumerate() { + macro_rules! test_success { + ($func:expr, $args:expr, $expected:expr) => { + let args: Vec<_> = $args.into_iter().map(Into::into).collect(); + if skip { + println!("skipping {}", Color::red($func)); + subtest += 1; + continue; + } + + let machine = machine.as_mut().expect("no machine"); + machine.jump_into_function(&$func, args.clone()); + machine.start_merkle_caching(); + run!(machine, 10_000_000, outname!(), true); + + let output = match machine.get_final_result() { + Ok(output) => output, + Err(error) => { + let expected: Vec = $expected.into_iter().map(Into::into).collect(); + println!( + "Divergence in func {} of test {}", + Color::red($func), + Color::red(index), + ); + pretty_print_values("Args ", args); + pretty_print_values("Expected", expected); + println!(); + bail!("{}", error) + } + }; + + if $expected != output { + let expected: Vec = $expected.into_iter().map(Into::into).collect(); + println!( + "Divergence in func {} of test {}", + Color::red($func), + Color::red(index), + ); + pretty_print_values("Args ", args); + pretty_print_values("Expected", expected); + pretty_print_values("Observed", output); + println!(); + bail!( + "Failure in test {}", + Color::red(format!("{} #{}", wasmfile, subtest)) + ) + } + subtest += 1; + }; + } + + match command { + Command::Module { filename } => { + wasmfile = filename; + machine = None; + subtest = 1; + + let mech = Machine::from_paths( + &[soft_float.clone()], + &PathBuf::from("tests").join(&wasmfile), + false, + false, + false, + GlobalState::default(), + HashMap::default(), + machine::get_empty_preimage_resolver(), + ); + + if let Err(error) = &mech { + let error = error.root_cause().to_string(); + skip = true; + + if error.contains("Module has no code") { + // We don't support metadata-only modules that have no code + continue; + } + if error.contains("Unsupported import") { + // We don't support the import test's functions + continue; + } + if error.contains("multiple tables") { + // We don't support the reference-type extension + continue; + } + if error.contains("bulk memory") { + // We don't support the bulk-memory extension + continue; + } + bail!("Unexpected error parsing module {}: {}", wasmfile, error) + } + + machine = mech.ok(); + skip = false; + + if let Some(machine) = &mut machine { + machine.step_n(1000)?; // run init + machine.start_merkle_caching(); + } + } + Command::AssertReturn { action, expected } => { + let (func, args) = action!(action); + test_success!(func, args, expected); + } + Command::Action { action } => { + let (func, args) = action!(action); + let expected: Vec = vec![]; + test_success!(func, args, expected); + } + Command::AssertTrap { action } => { + let (func, args) = action!(action); + let args: Vec<_> = args.into_iter().map(Into::into).collect(); + let test = Color::red(format!("{} #{}", wasmfile, subtest)); + + let machine = machine.as_mut().unwrap(); + machine.jump_into_function(&func, args.clone()); + run!(machine, 1000, outname!(), true); + + if machine.get_status() == MachineStatus::Running { + bail!("machine failed to trap in test {}", test) + } + if let Ok(output) = machine.get_final_result() { + println!( + "Divergence in func {} of test {}", + Color::red(func), + Color::red(index), + ); + pretty_print_values("Args ", args); + pretty_print_values("Output", output); + println!(); + bail!("Unexpected success in test {}", test) + } + subtest += 1; + } + Command::AssertExhaustion { action } => { + let (func, args) = action!(action); + let args: Vec<_> = args.into_iter().map(Into::into).collect(); + let test = Color::red(format!("{} #{}", wasmfile, subtest)); + + let machine = machine.as_mut().unwrap(); + machine.jump_into_function(&func, args.clone()); + run!(machine, 100_000, outname!(), false); // this is proportional to the amount of RAM + + if machine.get_status() != MachineStatus::Running { + bail!("machine should spin {}", test) + } + subtest += 1; + } + Command::AssertMalformed { filename } => { + let wasmpath = PathBuf::from("tests").join(&filename); + + let _ = Machine::from_paths( + &[soft_float.clone()], + &wasmpath, + false, + false, + false, + GlobalState::default(), + HashMap::default(), + machine::get_empty_preimage_resolver(), + ) + .expect_err(&format!("failed to reject invalid module {}", filename)); + } + _ => {} + } + } + + println!( + "{} {}", + Color::grey("done in"), + Color::pink(format!("{}ms", start_time.elapsed().as_millis())) + ); + Ok(()) +} diff --git a/arbitrator/wasm-testsuite/testsuite b/arbitrator/wasm-testsuite/testsuite new file mode 160000 index 0000000000..e25ae15935 --- /dev/null +++ b/arbitrator/wasm-testsuite/testsuite @@ -0,0 +1 @@ +Subproject commit e25ae159357c055b3a6fac99043644e208d26d2a diff --git a/arbnode/api.go b/arbnode/api.go index d9bffc8063..105fcb9f65 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbos/arbosState" - "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/validator" "github.com/pkg/errors" @@ -57,106 +56,6 @@ type ArbDebugAPI struct { blockchain *core.BlockChain } -type PricingModelHistoryPreExp struct { - First uint64 `json:"first"` - Timestamp []uint64 `json:"timestamp"` - BaseFee []*big.Int `json:"baseFee"` - RateEstimate []uint64 `json:"rateEstimate"` - GasPool []int64 `json:"gasPool"` - GasUsed []uint64 `json:"gasUsed"` - L1BaseFeeEstimate []*big.Int `json:"l1BaseFeeEstimate"` - L1BaseFeeUpdateTime []uint64 `json:"l1BaseFeeUpdateTime"` - GasPoolMax int64 `json:"gasPoolMax"` - GasPoolTarget uint64 `json:"gasPoolTarget"` - GasPoolWeight uint64 `json:"gasPoolWeight"` - SpeedLimit uint64 `json:"speedLimit"` - MaxPerBlockGasLimit uint64 `json:"maxPerBlockGasLimit"` - L1BaseFeeEstimateInertia uint64 `json:"l1BaseFeeEstimateInertia"` -} - -func (api *ArbDebugAPI) PricingModelPreExp(ctx context.Context, start, end rpc.BlockNumber) (PricingModelHistoryPreExp, error) { - start, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, start) - end, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, end) - - blocks := end.Int64() - start.Int64() - if blocks <= 0 { - return PricingModelHistoryPreExp{}, fmt.Errorf("invalid block range: %v to %v", start.Int64(), end.Int64()) - } - - history := PricingModelHistoryPreExp{ - First: uint64(start), - Timestamp: make([]uint64, blocks), - BaseFee: make([]*big.Int, blocks), - RateEstimate: make([]uint64, blocks), - GasPool: make([]int64, blocks), - GasUsed: make([]uint64, blocks), - L1BaseFeeEstimate: make([]*big.Int, blocks), - L1BaseFeeUpdateTime: make([]uint64, blocks+1), - } - - if start > core.NitroGenesisBlock { - state, _, err := stateAndHeader(api.blockchain, uint64(start)-1) - if err != nil { - return history, err - } - l1BaseFeeUpdateTime, err := state.L1PricingState().LastL1BaseFeeUpdateTime() - if err != nil { - return history, err - } - history.L1BaseFeeUpdateTime[0] = l1BaseFeeUpdateTime - } - - for i := uint64(0); i < uint64(blocks); i++ { - state, header, err := stateAndHeader(api.blockchain, i+uint64(start)) - if err != nil { - return history, err - } - l1Pricing := state.L1PricingState() - l2Pricing := state.L2PricingState() - - if state.FormatVersion() >= l2pricing.FirstExponentialPricingVersion { - // blocks from here on use the new model so we'll zero-fill the remaining values - break - } - - rateEstimate, _ := l2Pricing.RateEstimate() - gasPool, _ := l2Pricing.GasPool_preExp() - l1BaseFeeEstimate, _ := l1Pricing.L1BaseFeeEstimateWei() - l1BaseFeeUpdateTime, err := l1Pricing.LastL1BaseFeeUpdateTime() - if err != nil { - return history, err - } - - history.Timestamp[i] = header.Time - history.BaseFee[i] = header.BaseFee - history.RateEstimate[i] = rateEstimate - history.GasPool[i] = gasPool - history.GasUsed[i] = header.GasUsed - history.L1BaseFeeEstimate[i] = l1BaseFeeEstimate - history.L1BaseFeeUpdateTime[i+1] = l1BaseFeeUpdateTime - - if i == uint64(blocks)-1 { - speedLimit, _ := l2Pricing.SpeedLimitPerSecond() - gasPoolMax, _ := l2Pricing.GasPoolMax() - gasPoolTarget, _ := l2Pricing.GasPoolTarget() - gasPoolWeight, _ := l2Pricing.GasPoolWeight() - maxPerBlockGasLimit, _ := l2Pricing.MaxPerBlockGasLimit() - l1BaseFeeEstimateInertia, err := l1Pricing.L1BaseFeeEstimateInertia() - if err != nil { - return history, err - } - history.SpeedLimit = speedLimit - history.GasPoolMax = gasPoolMax - history.GasPoolTarget = uint64(gasPoolTarget) - history.GasPoolWeight = uint64(gasPoolWeight) - history.MaxPerBlockGasLimit = maxPerBlockGasLimit - history.L1BaseFeeEstimateInertia = l1BaseFeeEstimateInertia - } - } - - return history, nil -} - type PricingModelHistory struct { First uint64 `json:"first"` Timestamp []uint64 `json:"timestamp"` @@ -164,7 +63,6 @@ type PricingModelHistory struct { GasBacklog []uint64 `json:"gasBacklog"` GasUsed []uint64 `json:"gasUsed"` L1BaseFeeEstimate []*big.Int `json:"l1BaseFeeEstimate"` - L1BaseFeeUpdateTime []uint64 `json:"l1BaseFeeUpdateTime"` MinBaseFee *big.Int `json:"minBaseFee"` SpeedLimit uint64 `json:"speedLimit"` MaxPerBlockGasLimit uint64 `json:"maxPerBlockGasLimit"` @@ -174,8 +72,8 @@ type PricingModelHistory struct { } func (api *ArbDebugAPI) PricingModel(ctx context.Context, start, end rpc.BlockNumber) (PricingModelHistory, error) { - start, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, start) - end, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, end) + start, _ = api.blockchain.ClipToPostNitroGenesis(start) + end, _ = api.blockchain.ClipToPostNitroGenesis(end) blocks := end.Int64() - start.Int64() if blocks <= 0 { @@ -183,25 +81,12 @@ func (api *ArbDebugAPI) PricingModel(ctx context.Context, start, end rpc.BlockNu } history := PricingModelHistory{ - First: uint64(start), - Timestamp: make([]uint64, blocks), - BaseFee: make([]*big.Int, blocks), - GasBacklog: make([]uint64, blocks), - GasUsed: make([]uint64, blocks), - L1BaseFeeEstimate: make([]*big.Int, blocks), - L1BaseFeeUpdateTime: make([]uint64, blocks+1), - } - - if start > core.NitroGenesisBlock { - state, _, err := stateAndHeader(api.blockchain, uint64(start)-1) - if err != nil { - return history, err - } - l1BaseFeeUpdateTime, err := state.L1PricingState().LastL1BaseFeeUpdateTime() - if err != nil { - return history, err - } - history.L1BaseFeeUpdateTime[0] = l1BaseFeeUpdateTime + First: uint64(start), + Timestamp: make([]uint64, blocks), + BaseFee: make([]*big.Int, blocks), + GasBacklog: make([]uint64, blocks), + GasUsed: make([]uint64, blocks), + L1BaseFeeEstimate: make([]*big.Int, blocks), } for i := uint64(0); i < uint64(blocks); i++ { @@ -215,27 +100,17 @@ func (api *ArbDebugAPI) PricingModel(ctx context.Context, start, end rpc.BlockNu history.Timestamp[i] = header.Time history.BaseFee[i] = header.BaseFee - if state.FormatVersion() < l2pricing.FirstExponentialPricingVersion { - // this block doesn't use the exponential pricing model, so we'll zero-fill it - continue - } - gasBacklog, _ := l2Pricing.GasBacklog() - l1BaseFeeEstimate, _ := l1Pricing.L1BaseFeeEstimateWei() - l1BaseFeeUpdateTime, err := l1Pricing.LastL1BaseFeeUpdateTime() - if err != nil { - return history, err - } + l1BaseFeeEstimate, _ := l1Pricing.PricePerUnit() history.GasBacklog[i] = gasBacklog history.GasUsed[i] = header.GasUsed history.L1BaseFeeEstimate[i] = l1BaseFeeEstimate - history.L1BaseFeeUpdateTime[i+1] = l1BaseFeeUpdateTime if i == uint64(blocks)-1 { speedLimit, _ := l2Pricing.SpeedLimitPerSecond() - maxPerBlockGasLimit, _ := l2Pricing.MaxPerBlockGasLimit() - l1BaseFeeEstimateInertia, err := l1Pricing.L1BaseFeeEstimateInertia() + maxPerBlockGasLimit, _ := l2Pricing.PerBlockGasLimit() + l1BaseFeeEstimateInertia, err := l1Pricing.Inertia() minBaseFee, _ := l2Pricing.MinBaseFeeWei() pricingInertia, _ := l2Pricing.PricingInertia() backlogTolerance, _ := l2Pricing.BacklogTolerance() @@ -255,8 +130,8 @@ func (api *ArbDebugAPI) PricingModel(ctx context.Context, start, end rpc.BlockNu } func (api *ArbDebugAPI) TimeoutQueueHistory(ctx context.Context, start, end rpc.BlockNumber) ([]uint64, error) { - start, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, start) - end, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, end) + start, _ = api.blockchain.ClipToPostNitroGenesis(start) + end, _ = api.blockchain.ClipToPostNitroGenesis(end) blocks := end.Int64() - start.Int64() if blocks <= 0 { @@ -288,7 +163,7 @@ type TimeoutQueue struct { func (api *ArbDebugAPI) TimeoutQueue(ctx context.Context, blockNum rpc.BlockNumber) (TimeoutQueue, error) { - blockNum, _ = arbitrum.ClipToPostNitroGenesis(api.blockchain, blockNum) + blockNum, _ = api.blockchain.ClipToPostNitroGenesis(blockNum) queue := TimeoutQueue{ BlockNumber: uint64(blockNum), diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 0d0690ad0d..4638b3f6b7 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -10,6 +10,7 @@ import ( "math/big" "time" + "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/util/headerreader" "github.com/andybalholm/brotli" @@ -353,30 +354,39 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context, batchSeqNum u if err != nil { return nil, err } + forcePostBatch := timeSinceNextMessage >= b.config.MaxBatchPostInterval + haveUsefulMessage := false + for b.building.msgCount < msgCount { msg, err := b.streamer.GetMessage(b.building.msgCount) if err != nil { log.Error("error getting message from streamer", "error", err) break } + if msg.Message.Header.Kind != arbos.L1MessageType_BatchPostingReport { + haveUsefulMessage = true + } success, err := b.building.segments.AddMessage(&msg) if err != nil { log.Error("error adding message to batch", "error", err) break } if !success { - forcePostBatch = true // this batch is full + // this batch is full + forcePostBatch = true + haveUsefulMessage = true break } b.building.msgCount++ } + if b.building.segments.IsEmpty() { // we don't need to post a batch for the time being b.pendingMsgTimestamp = time.Now() return nil, nil } - if !forcePostBatch { + if !forcePostBatch || !haveUsefulMessage { // the batch isn't full yet and we've posted a batch recently // don't post anything for now return nil, nil diff --git a/arbnode/delayed.go b/arbnode/delayed.go index a966b3aea7..f76b6dc5e3 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -37,7 +37,7 @@ func init() { } messageDeliveredID = parsedIBridgeABI.Events["MessageDelivered"].ID - parsedIMessageProviderABI, err := bridgegen.IMessageProviderMetaData.GetAbi() + parsedIMessageProviderABI, err := bridgegen.IDelayedMessageProviderMetaData.GetAbi() if err != nil { panic(err) } @@ -56,7 +56,7 @@ type DelayedBridge struct { address common.Address fromBlock uint64 client arbutil.L1Interface - messageProviders map[common.Address]*bridgegen.IMessageProvider + messageProviders map[common.Address]*bridgegen.IDelayedMessageProvider } func NewDelayedBridge(client arbutil.L1Interface, addr common.Address, fromBlock uint64) (*DelayedBridge, error) { @@ -70,7 +70,7 @@ func NewDelayedBridge(client arbutil.L1Interface, addr common.Address, fromBlock address: addr, fromBlock: fromBlock, client: client, - messageProviders: make(map[common.Address]*bridgegen.IMessageProvider), + messageProviders: make(map[common.Address]*bridgegen.IDelayedMessageProvider), }, nil } @@ -86,7 +86,7 @@ func (b *DelayedBridge) GetMessageCount(ctx context.Context, blockNumber *big.In Context: ctx, BlockNumber: blockNumber, } - bigRes, err := b.con.MessageCount(opts) + bigRes, err := b.con.DelayedMessageCount(opts) if err != nil { return 0, errors.WithStack(err) } @@ -101,7 +101,7 @@ func (b *DelayedBridge) GetAccumulator(ctx context.Context, sequenceNumber uint6 Context: ctx, BlockNumber: blockNumber, } - return b.con.InboxAccs(opts, new(big.Int).SetUint64(sequenceNumber)) + return b.con.DelayedInboxAccs(opts, new(big.Int).SetUint64(sequenceNumber)) } type DelayedInboxMessage struct { @@ -255,7 +255,7 @@ func (b *DelayedBridge) parseMessage(ctx context.Context, ethLog types.Log) (*bi con, ok := b.messageProviders[ethLog.Address] if !ok { var err error - con, err = bridgegen.NewIMessageProvider(ethLog.Address, b.client) + con, err = bridgegen.NewIDelayedMessageProvider(ethLog.Address, b.client) if err != nil { return nil, nil, errors.WithStack(err) } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 27700ec173..5ece1fea48 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "github.com/offchainlabs/nitro/util/headerreader" "math/big" "strings" "sync" @@ -18,6 +17,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/stopwaiter" ) diff --git a/arbnode/node.go b/arbnode/node.go index 5aa0e84a79..7d3e37bd9e 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -164,7 +164,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) } - rollupEventBridgeTemplate, tx, _, err := rollupgen.DeployRollupEventBridge(auth, client) + rollupEventBridgeTemplate, tx, _, err := rollupgen.DeployRollupEventInbox(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("rollup event bridge deploy error: %w", err) @@ -232,33 +232,45 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, error) { +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth) if err != nil { - return nil, common.Address{}, err + return nil, common.Address{}, common.Address{}, common.Address{}, err } ospEntryAddr, challengeManagerAddr, err := deployChallengeFactory(ctx, l1Reader, auth) if err != nil { - return nil, common.Address{}, err + return nil, common.Address{}, common.Address{}, common.Address{}, err } rollupAdminLogic, tx, _, err := rollupgen.DeployRollupAdminLogic(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return nil, common.Address{}, fmt.Errorf("rollup admin logic deploy error: %w", err) + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup admin logic deploy error: %w", err) } rollupUserLogic, tx, _, err := rollupgen.DeployRollupUserLogic(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return nil, common.Address{}, fmt.Errorf("rollup user logic deploy error: %w", err) + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup user logic deploy error: %w", err) } rollupCreatorAddress, tx, rollupCreator, err := rollupgen.DeployRollupCreator(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return nil, common.Address{}, fmt.Errorf("rollup creator deploy error: %w", err) + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup creator deploy error: %w", err) + } + + validatorUtils, tx, _, err := rollupgen.DeployValidatorUtils(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("validator utils deploy error: %w", err) + } + + validatorWalletCreator, tx, _, err := rollupgen.DeployValidatorWalletCreator(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("validator wallet creator deploy error: %w", err) } tx, err = rollupCreator.SetTemplates( @@ -268,16 +280,18 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade challengeManagerAddr, rollupAdminLogic, rollupUserLogic, + validatorUtils, + validatorWalletCreator, ) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return nil, common.Address{}, fmt.Errorf("rollup set template error: %w", err) + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup set template error: %w", err) } - return rollupCreator, rollupCreatorAddress, nil + return rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, nil } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, sequencer common.Address, authorizeValidators uint64, wasmModuleRoot common.Hash, chainId *big.Int, readerConfig headerreader.Config, machineConfig validator.NitroMachineConfig) (*RollupAddresses, error) { +func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, sequencer, rollupOwner common.Address, authorizeValidators uint64, wasmModuleRoot common.Hash, chainId *big.Int, readerConfig headerreader.Config, machineConfig validator.NitroMachineConfig) (*RollupAddresses, error) { l1Reader := headerreader.New(l1client, readerConfig) l1Reader.Start(ctx) defer l1Reader.StopAndWait() @@ -290,7 +304,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b } } - rollupCreator, rollupCreatorAddress, err := deployRollupCreator(ctx, l1Reader, deployAuth) + rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -316,7 +330,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b StakeToken: common.Address{}, BaseStake: big.NewInt(params.Ether), WasmModuleRoot: wasmModuleRoot, - Owner: deployAuth.From, + Owner: rollupOwner, LoserStakeEscrow: common.Address{}, ChainId: chainId, SequencerInboxMaxTimeVariation: seqInboxParams, @@ -339,22 +353,14 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b if err != nil { return nil, fmt.Errorf("error getting sequencer inbox: %w", err) } - tx, err = sequencerInbox.SetIsBatchPoster(deployAuth, sequencer, true) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return nil, fmt.Errorf("error setting is batch poster: %w", err) - } - - validatorUtils, tx, _, err := rollupgen.DeployValidatorUtils(deployAuth, l1client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return nil, fmt.Errorf("validator utils deploy error: %w", err) - } - validatorWalletCreator, tx, _, err := rollupgen.DeployValidatorWalletCreator(deployAuth, l1client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return nil, fmt.Errorf("validator utils deploy error: %w", err) + // if a zero sequencer address is specified, don't authorize any sequencers + if sequencer != (common.Address{}) { + tx, err = sequencerInbox.SetIsBatchPoster(deployAuth, sequencer, true) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, fmt.Errorf("error setting is batch poster: %w", err) + } } var allowValidators []bool @@ -376,7 +382,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b } return &RollupAddresses{ - Bridge: info.DelayedBridge, + Bridge: info.Bridge, Inbox: info.InboxAddress, SequencerInbox: info.SequencerInbox, DeployedAt: receipt.BlockNumber.Uint64(), @@ -504,6 +510,7 @@ type SequencerConfig struct { MaxBlockSpeed time.Duration `koanf:"max-block-speed"` MaxRevertGasReject uint64 `koanf:"max-revert-gas-reject"` MaxAcceptableTimestampDelta time.Duration `koanf:"max-acceptable-timestamp-delta"` + SenderWhitelist []string `koanf:"sender-whitelist"` Dangerous DangerousSequencerConfig `koanf:"dangerous"` } @@ -520,6 +527,7 @@ var TestSequencerConfig = SequencerConfig{ MaxBlockSpeed: time.Millisecond * 10, MaxRevertGasReject: params.TxGas + 10000, MaxAcceptableTimestampDelta: time.Hour, + SenderWhitelist: nil, Dangerous: TestDangerousSequencerConfig, } @@ -528,6 +536,7 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".max-block-speed", DefaultSequencerConfig.MaxBlockSpeed, "minimum delay between blocks (sets a maximum speed of block production)") f.Uint64(prefix+".max-revert-gas-reject", DefaultSequencerConfig.MaxRevertGasReject, "maximum gas executed in a revert for the sequencer to reject the transaction instead of posting it (anti-DOS)") f.Duration(prefix+".max-acceptable-timestamp-delta", DefaultSequencerConfig.MaxAcceptableTimestampDelta, "maximum acceptable time difference between the local time and the latest L1 block's timestamp") + f.StringArray(prefix+".sender-whitelist", DefaultSequencerConfig.SenderWhitelist, "whitelist of authorized senders (if empty, everyone is allowed)") DangerousSequencerConfigAddOptions(prefix+".dangerous", f) } @@ -682,6 +691,7 @@ func createNodeImpl( if err != nil { return nil, err } + txStreamer.SetInboxReader(inboxReader) nitroMachineConfig := validator.DefaultNitroMachineConfig if config.Wasm.RootPath != "" { @@ -1209,7 +1219,7 @@ func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitData return err } - genBlock := arbosState.MakeGenesisBlock(prevHash, blockNumber, timestamp, stateRoot) + genBlock := arbosState.MakeGenesisBlock(prevHash, blockNumber, timestamp, stateRoot, chainConfig) blockHash := genBlock.Hash() if storedGenHash == EmptyHash { @@ -1222,6 +1232,16 @@ func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitData return nil } +func TryReadStoredChainConfig(chainDb ethdb.Database) *params.ChainConfig { + EmptyHash := common.Hash{} + + block0Hash := rawdb.ReadCanonicalHash(chainDb, 0) + if block0Hash == EmptyHash { + return nil + } + return rawdb.ReadChainConfig(chainDb, block0Hash) +} + func WriteOrTestChainConfig(chainDb ethdb.Database, config *params.ChainConfig) error { EmptyHash := common.Hash{} diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index 2efd5932b0..e1c38ec2ab 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -6,12 +6,13 @@ package arbnode import ( "context" "fmt" - "github.com/offchainlabs/nitro/util/headerreader" "math" "sync" "sync/atomic" "time" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -41,10 +42,11 @@ func (i *txQueueItem) returnResult(err error) { type Sequencer struct { stopwaiter.StopWaiter - txStreamer *TransactionStreamer - txQueue chan txQueueItem - l1Reader *headerreader.HeaderReader - config SequencerConfig + txStreamer *TransactionStreamer + txQueue chan txQueueItem + l1Reader *headerreader.HeaderReader + config SequencerConfig + senderWhitelist map[common.Address]struct{} L1BlockAndTimeMutex sync.Mutex l1BlockNumber uint64 @@ -55,17 +57,40 @@ type Sequencer struct { } func NewSequencer(txStreamer *TransactionStreamer, l1Reader *headerreader.HeaderReader, config SequencerConfig) (*Sequencer, error) { + senderWhitelist := make(map[common.Address]struct{}) + for _, address := range config.SenderWhitelist { + if len(address) == 0 { + continue + } + if !common.IsHexAddress(address) { + return nil, fmt.Errorf("sequencer sender whitelist entry \"%v\" is not a valid address", address) + } + senderWhitelist[common.HexToAddress(address)] = struct{}{} + } return &Sequencer{ - txStreamer: txStreamer, - txQueue: make(chan txQueueItem, 128), - l1Reader: l1Reader, - config: config, - l1BlockNumber: 0, - l1Timestamp: 0, + txStreamer: txStreamer, + txQueue: make(chan txQueueItem, 128), + l1Reader: l1Reader, + config: config, + senderWhitelist: senderWhitelist, + l1BlockNumber: 0, + l1Timestamp: 0, }, nil } func (s *Sequencer) PublishTransaction(ctx context.Context, tx *types.Transaction) error { + if len(s.senderWhitelist) > 0 { + signer := types.LatestSigner(s.txStreamer.bc.Config()) + sender, err := types.Sender(signer, tx) + if err != nil { + return err + } + _, authorized := s.senderWhitelist[sender] + if !authorized { + return errors.New("transaction sender is not on the whitelist") + } + } + resultChan := make(chan error, 1) queueItem := txQueueItem{ tx, @@ -86,13 +111,6 @@ func (s *Sequencer) PublishTransaction(ctx context.Context, tx *types.Transactio } func (s *Sequencer) preTxFilter(state *arbosState.ArbosState, tx *types.Transaction, sender common.Address) error { - agg, err := state.L1PricingState().ReimbursableAggregatorForSender(sender) - if err != nil { - return err - } - if agg == nil || *agg != l1pricing.SequencerAddress { - return errors.New("transaction sender's preferred aggregator is not the sequencer") - } return nil } @@ -217,7 +235,7 @@ func (s *Sequencer) sequenceTransactions(ctx context.Context) { header := &arbos.L1IncomingMessageHeader{ Kind: arbos.L1MessageType_L2Message, - Poster: l1pricing.SequencerAddress, + Poster: l1pricing.BatchPosterAddress, BlockNumber: l1Block, Timestamp: uint64(timestamp), RequestId: nil, diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 36dfdb6fbe..7b22996d4b 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -54,6 +54,7 @@ type TransactionStreamer struct { coordinator *SeqCoordinator broadcastServer *broadcaster.Broadcaster validator *validator.BlockValidator + inboxReader *InboxReader } func NewTransactionStreamer(db ethdb.Database, bc *core.BlockChain, broadcastServer *broadcaster.Broadcaster) (*TransactionStreamer, error) { @@ -96,6 +97,16 @@ func (s *TransactionStreamer) SetSeqCoordinator(coordinator *SeqCoordinator) { s.coordinator = coordinator } +func (s *TransactionStreamer) SetInboxReader(inboxReader *InboxReader) { + if s.Started() { + panic("trying to set inbox reader after start") + } + if s.inboxReader != nil { + panic("trying to set inbox reader when already set") + } + s.inboxReader = inboxReader +} + func (s *TransactionStreamer) cleanupInconsistentState() error { // If it doesn't exist yet, set the message count to 0 hasMessageCount, err := s.db.Has(messageCountKey) @@ -600,8 +611,7 @@ func (s *TransactionStreamer) SequenceDelayedMessages(ctx context.Context, messa } func (s *TransactionStreamer) GetGenesisBlockNumber() (uint64, error) { - // TODO: when block 0 is no longer necessarily the genesis, track this and update core.NitroGenesisBlock - return 0, nil + return s.bc.Config().ArbitrumChainParams.GenesisBlockNum, nil } func (s *TransactionStreamer) BlockNumberToMessageCount(blockNum uint64) (arbutil.MessageIndex, error) { @@ -696,6 +706,10 @@ func (s *TransactionStreamer) createBlocks(ctx context.Context) error { } }() + batchFetcher := func(batchNum uint64) ([]byte, error) { + return s.inboxReader.GetSequencerMessageBytes(ctx, batchNum) + } + for pos < msgCount { statedb, err = s.bc.StateAt(lastBlockHeader.Root) @@ -720,14 +734,18 @@ func (s *TransactionStreamer) createBlocks(ctx context.Context) error { return err } - block, receipts := arbos.ProduceBlock( + block, receipts, err := arbos.ProduceBlock( msg.Message, msg.DelayedMessagesRead, lastBlockHeader, statedb, s.bc, s.bc.Config(), + batchFetcher, ) + if err != nil { + return err + } // ProduceBlock advances one message pos++ diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index d47f08fc5e..3903e34944 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -44,9 +44,9 @@ func (aset *AddressSet) GetAnyMember() (*common.Address, error) { if err != nil || size == 0 { return nil, err } - addrAsHash, err := aset.backingStorage.GetByUint64(1) - addr := common.BytesToAddress(addrAsHash.Bytes()) - return &addr, err + sba := aset.backingStorage.OpenStorageBackedAddressOrNil(1) + addr, err := sba.Get() + return addr, err } func (aset *AddressSet) Clear() error { @@ -65,18 +65,21 @@ func (aset *AddressSet) Clear() error { return aset.size.Clear() } -func (aset *AddressSet) AllMembers() ([]common.Address, error) { +func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { size, err := aset.size.Get() if err != nil { return nil, err } + if size > maxNumToReturn { + size = maxNumToReturn + } ret := make([]common.Address, size) for i := range ret { - bytes, err := aset.backingStorage.GetByUint64(uint64(i + 1)) + sba := aset.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) + ret[i], err = sba.Get() if err != nil { return nil, err } - ret[i] = common.BytesToAddress(bytes.Bytes()) } return ret, nil } @@ -90,13 +93,15 @@ func (aset *AddressSet) Add(addr common.Address) error { if err != nil { return err } + sba := aset.backingStorage.OpenStorageBackedAddress(1 + size) slot := util.UintToHash(1 + size) addrAsHash := common.BytesToHash(addr.Bytes()) err = aset.byAddress.Set(addrAsHash, slot) if err != nil { return err } - err = aset.backingStorage.Set(slot, addrAsHash) + sba = aset.backingStorage.OpenStorageBackedAddress(1 + size) + err = sba.Set(addr) if err != nil { return err } diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go index 7a70184a27..8485f85cc6 100644 --- a/arbos/addressSet/addressSet_test.go +++ b/arbos/addressSet/addressSet_test.go @@ -4,6 +4,7 @@ package addressSet import ( + "github.com/ethereum/go-ethereum/common/math" "testing" "github.com/ethereum/go-ethereum/common" @@ -92,7 +93,7 @@ func TestAddressSet(t *testing.T) { } Require(t, aset.Add(addr1)) - all, err := aset.AllMembers() + all, err := aset.AllMembers(math.MaxUint64) Require(t, err) if len(all) != 2 { Fail(t) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index ad6ee2588e..34a4d54225 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -11,10 +11,8 @@ import ( "github.com/offchainlabs/nitro/arbos/blockhash" "github.com/offchainlabs/nitro/arbos/l2pricing" - "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/arbos/addressSet" - "github.com/offchainlabs/nitro/arbos/blsTable" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/addressTable" @@ -45,11 +43,11 @@ type ArbosState struct { l2PricingState *l2pricing.L2PricingState retryableState *retryables.RetryableState addressTable *addressTable.AddressTable - blsTable *blsTable.BLSTable chainOwners *addressSet.AddressSet sendMerkle *merkleAccumulator.MerkleAccumulator blockhashes *blockhash.Blockhashes chainId storage.StorageBackedBigInt + genesisBlockNum storage.StorageBackedUint64 backingStorage *storage.Storage Burner burn.Burner } @@ -75,11 +73,11 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace)), retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace), stateDB), addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace)), - blsTable.Open(backingStorage.OpenSubStorage(blsTableSubspace)), addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace)), merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace)), blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), + backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), backingStorage, burner, }, nil @@ -134,6 +132,7 @@ const ( upgradeTimestampOffset networkFeeAccountOffset chainIdOffset + genesisBlockNumOffset ) type ArbosStateSubspaceID []byte @@ -143,10 +142,9 @@ var ( l2PricingSubspace ArbosStateSubspaceID = []byte{1} retryablesSubspace ArbosStateSubspaceID = []byte{2} addressTableSubspace ArbosStateSubspaceID = []byte{3} - blsTableSubspace ArbosStateSubspaceID = []byte{4} - chainOwnerSubspace ArbosStateSubspaceID = []byte{5} - sendMerkleSubspace ArbosStateSubspaceID = []byte{6} - blockhashesSubspace ArbosStateSubspaceID = []byte{7} + chainOwnerSubspace ArbosStateSubspaceID = []byte{4} + sendMerkleSubspace ArbosStateSubspaceID = []byte{5} + blockhashesSubspace ArbosStateSubspaceID = []byte{6} ) // Returns a list of precompiles that only appear in Arbitrum chains (i.e. ArbOS precompiles) at the genesis block @@ -185,7 +183,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p } arbosVersion = chainConfig.ArbitrumChainParams.InitialArbOSVersion - if arbosVersion < 1 || arbosVersion > 4 { + if arbosVersion != 1 { return nil, fmt.Errorf("cannot initialize to unsupported ArbOS version %v", arbosVersion) } @@ -200,19 +198,16 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(upgradeTimestampOffset), 0) _ = sto.SetUint64ByUint64(uint64(networkFeeAccountOffset), 0) // the 0 address until an owner sets it _ = sto.SetByUint64(uint64(chainIdOffset), common.BigToHash(chainConfig.ChainID)) + _ = sto.SetUint64ByUint64(uint64(genesisBlockNumOffset), chainConfig.ArbitrumChainParams.GenesisBlockNum) _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace)) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace), arbosVersion) + _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) - _ = blsTable.InitializeBLSTable(sto.OpenSubStorage(blsTableSubspace)) merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace)) blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace)) - // by default, the remapped zero address is the initial chain owner - initialChainOwner := util.RemapL1Address(common.Address{}) - if chainConfig.ArbitrumChainParams.InitialChainOwner != (common.Address{}) { - initialChainOwner = chainConfig.ArbitrumChainParams.InitialChainOwner - } + // may be the zero address + initialChainOwner := chainConfig.ArbitrumChainParams.InitialChainOwner ownersStorage := sto.OpenSubStorage(chainOwnerSubspace) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -220,31 +215,15 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p return OpenArbosState(stateDB, burner) } -var TestnetUpgrade2Owner = common.HexToAddress("0x40Fd01b32e97803f12693517776826a71e2B8D5f") - func (state *ArbosState) UpgradeArbosVersionIfNecessary(currentTimestamp uint64, chainConfig *params.ChainConfig) { upgradeTo, err := state.upgradeVersion.Get() state.Restrict(err) flagday, _ := state.upgradeTimestamp.Get() if upgradeTo > state.arbosVersion && currentTimestamp >= flagday { for upgradeTo > state.arbosVersion && currentTimestamp >= flagday { - if state.arbosVersion == 1 { - // Upgrade version 1->2 adds a chain owner for the testnet - if arbmath.BigEquals(chainConfig.ChainID, params.ArbitrumTestnetChainConfig().ChainID) { - state.Restrict(state.chainOwners.Add(TestnetUpgrade2Owner)) - } - } else if state.arbosVersion == 2 { - // Upgrade version 2->3 has no state changes - } else if state.arbosVersion == 3 { - // Upgrade version 3->4 adds two fields to the L2 pricing model - // (We don't bother to remove no-longer-used fields, for safety - // and because they'll be removed when we telescope versions for re-launch.) - state.Restrict(state.l2PricingState.UpgradeToVersion4()) - } else { - // code to upgrade to future versions will be put here - panic("Unable to perform requested ArbOS upgrade") - } - state.arbosVersion++ + // code to upgrade to future versions will be put here + panic("Unable to perform requested ArbOS upgrade") + // state.arbosVersion++ } state.Restrict(state.backingStorage.SetUint64ByUint64(uint64(versionOffset), state.arbosVersion)) } @@ -291,10 +270,6 @@ func (state *ArbosState) AddressTable() *addressTable.AddressTable { return state.addressTable } -func (state *ArbosState) BLSTable() *blsTable.BLSTable { - return state.blsTable -} - func (state *ArbosState) ChainOwners() *addressSet.AddressSet { return state.chainOwners } @@ -329,3 +304,7 @@ func (state *ArbosState) KeccakHash(data ...[]byte) (common.Hash, error) { func (state *ArbosState) ChainId() (*big.Int, error) { return state.chainId.Get() } + +func (state *ArbosState) GenesisBlockNum() (uint64, error) { + return state.genesisBlockNum.Get() +} diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go index 675c87e972..0d4804b736 100644 --- a/arbos/arbosState/initialization_test.go +++ b/arbos/arbosState/initialization_test.go @@ -142,6 +142,7 @@ func checkRetryables(arbState *ArbosState, expected []statetransfer.Initializati func checkAccounts(db *state.StateDB, arbState *ArbosState, accts []statetransfer.AccountInitializationInfo, t *testing.T) { l1p := arbState.L1PricingState() + posterTable := l1p.BatchPosterTable() for _, acct := range accts { addr := acct.Addr if db.GetNonce(addr) != acct.Nonce { @@ -168,20 +169,17 @@ func checkAccounts(db *state.StateDB, arbState *ArbosState, accts []statetransfe t.Fatal(err) } } - if acct.AggregatorInfo != nil { - fc, err := l1p.AggregatorFeeCollector(addr) + isPoster, err := posterTable.ContainsPoster(addr) + Require(t, err) + if acct.AggregatorInfo != nil && isPoster { + posterInfo, err := posterTable.OpenPoster(addr, false) + Require(t, err) + fc, err := posterInfo.PayTo() Require(t, err) if fc != acct.AggregatorInfo.FeeCollector { t.Fatal() } } - if acct.AggregatorToPay != nil { - aggregator, err := l1p.ReimbursableAggregatorForSender(addr) - Require(t, err) - if aggregator == nil || *aggregator != *acct.AggregatorToPay { - Fail(t) - } - } } _ = l1p } diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 87f5a837c0..be957f2c71 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -20,7 +20,7 @@ import ( "github.com/offchainlabs/nitro/statetransfer" ) -func MakeGenesisBlock(parentHash common.Hash, blockNumber uint64, timestamp uint64, stateRoot common.Hash) *types.Block { +func MakeGenesisBlock(parentHash common.Hash, blockNumber uint64, timestamp uint64, stateRoot common.Hash, chainConfig *params.ChainConfig) *types.Block { head := &types.Header{ Number: new(big.Int).SetUint64(blockNumber), Nonce: types.EncodeNonce(1), // the genesis block reads the init message @@ -37,9 +37,10 @@ func MakeGenesisBlock(parentHash common.Hash, blockNumber uint64, timestamp uint } genesisHeaderInfo := types.HeaderInfo{ - SendRoot: common.Hash{}, - SendCount: 0, - L1BlockNumber: 0, + SendRoot: common.Hash{}, + SendCount: 0, + L1BlockNumber: 0, + ArbOSFormatVersion: chainConfig.ArbitrumChainParams.InitialArbOSVersion, } genesisHeaderInfo.UpdateHeaderWithInfo(head) @@ -153,16 +154,22 @@ func initializeRetryables(rs *retryables.RetryableState, initData statetransfer. func initializeArbosAccount(statedb *state.StateDB, arbosState *ArbosState, account statetransfer.AccountInitializationInfo) error { l1pState := arbosState.L1PricingState() + posterTable := l1pState.BatchPosterTable() if account.AggregatorInfo != nil { - err := l1pState.SetAggregatorFeeCollector(account.Addr, account.AggregatorInfo.FeeCollector) + isPoster, err := posterTable.ContainsPoster(account.Addr) if err != nil { return err } - } - if account.AggregatorToPay != nil { - err := l1pState.SetUserSpecifiedAggregator(account.Addr, account.AggregatorToPay) - if err != nil { - return err + if isPoster { + // poster is already authorized, just set its fee collector + poster, err := posterTable.OpenPoster(account.Addr, false) + if err != nil { + return err + } + err = poster.SetPayTo(account.AggregatorInfo.FeeCollector) + if err != nil { + return err + } } } diff --git a/arbos/block_processor.go b/arbos/block_processor.go index f4b434fd85..237eaaa595 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -29,10 +29,12 @@ import ( // set by the precompile module, to avoid a package dependence cycle var ArbRetryableTxAddress common.Address var ArbSysAddress common.Address +var InternalTxStartBlockMethodID [4]byte +var InternalTxBatchPostingReportMethodID [4]byte var RedeemScheduledEventID common.Hash var L2ToL1TransactionEventID common.Hash var L2ToL1TxEventID common.Hash -var EmitReedeemScheduledEvent func(*vm.EVM, uint64, uint64, [32]byte, [32]byte, common.Address) error +var EmitReedeemScheduledEvent func(*vm.EVM, uint64, uint64, [32]byte, [32]byte, common.Address, *big.Int, *big.Int) error var EmitTicketCreatedEvent func(*vm.EVM, [32]byte) error func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState.ArbosState, chainConfig *params.ChainConfig) *types.Header { @@ -95,6 +97,8 @@ func noopSequencingHooks() *SequencingHooks { } } +type FallibleBatchFetcher func(batchNum uint64) ([]byte, error) + func ProduceBlock( message *L1IncomingMessage, delayedMessagesRead uint64, @@ -102,17 +106,30 @@ func ProduceBlock( statedb *state.StateDB, chainContext core.ChainContext, chainConfig *params.ChainConfig, -) (*types.Block, types.Receipts) { - txes, err := message.ParseL2Transactions(chainConfig.ChainID) + batchFetcher FallibleBatchFetcher, +) (*types.Block, types.Receipts, error) { + var batchFetchErr error + txes, err := message.ParseL2Transactions(chainConfig.ChainID, func(batchNum uint64) []byte { + data, err := batchFetcher(batchNum) + if err != nil { + batchFetchErr = err + return nil + } + return data + }) + if batchFetchErr != nil { + return nil, nil, batchFetchErr + } if err != nil { log.Warn("error parsing incoming message", "err", err) txes = types.Transactions{} } hooks := noopSequencingHooks() - return ProduceBlockAdvanced( + block, receipts := ProduceBlockAdvanced( message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, ) + return block, receipts, nil } // A bit more flexible than ProduceBlock for use in the sequencer. @@ -146,7 +163,7 @@ func ProduceBlockAdvanced( header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) signer := types.MakeSigner(chainConfig, header.Number) - gasLeft, _ := state.L2PricingState().PerBlockGasLimit(state.FormatVersion()) + gasLeft, _ := state.L2PricingState().PerBlockGasLimit() l1BlockNum := l1Info.l1BlockNumber // Prepend a tx before all others to touch up the state (update the L1 block num, pricing pools, etc) @@ -186,10 +203,7 @@ func ProduceBlockAdvanced( } else { tx = txes[0] txes = txes[1:] - switch tx := tx.GetInner().(type) { - case *types.ArbitrumInternalTx: - tx.TxIndex = uint64(len(receipts)) - default: + if tx.Type() != types.ArbitrumInternalTxType { hooks = sequencingHooks // the sequencer has the ability to drop this tx isUserTx = true } @@ -210,7 +224,7 @@ func ProduceBlockAdvanced( if gasPrice.Sign() > 0 { dataGas = math.MaxUint64 - posterCost, _ := state.L1PricingState().GetPosterInfo(tx, sender, poster) + posterCost, _ := state.L1PricingState().GetPosterInfo(tx, poster) posterCostInL2Gas := arbmath.BigDiv(posterCost, gasPrice) if posterCostInL2Gas.IsUint64() { @@ -397,9 +411,10 @@ func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state. size, _ := acc.Size() nextL1BlockNumber, _ := state.Blockhashes().NextBlockNumber() arbitrumHeader := types.HeaderInfo{ - SendRoot: root, - SendCount: size, - L1BlockNumber: nextL1BlockNumber, + SendRoot: root, + SendCount: size, + L1BlockNumber: nextL1BlockNumber, + ArbOSFormatVersion: state.FormatVersion(), } arbitrumHeader.UpdateHeaderWithInfo(header) } diff --git a/arbos/blsTable/bls.go b/arbos/blsTable/bls.go deleted file mode 100644 index 7b1b386826..0000000000 --- a/arbos/blsTable/bls.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package blsTable - -import ( - "math/big" - - "github.com/ethereum/go-ethereum" - "github.com/offchainlabs/nitro/arbos/addressSet" - "github.com/offchainlabs/nitro/blsSignatures" - - "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbos/storage" - "github.com/offchainlabs/nitro/arbos/util" -) - -type BLSTable struct { - backingStorage *storage.Storage - legacyAddressSet *addressSet.AddressSet - legacyTableByAddress *storage.Storage - bls12381AddressSet *addressSet.AddressSet - bls12381TableByAddress *storage.Storage -} - -var ( - legacyAddressSetKey = []byte{0} - legacyTableByAddressKey = []byte{1} - bls12381AddressSetKey = []byte{2} - bls12381TableByAddressKey = []byte{3} -) - -func InitializeBLSTable(sto *storage.Storage) error { - err := addressSet.Initialize(sto.OpenSubStorage(legacyAddressSetKey)) - if err != nil { - return err - } - return addressSet.Initialize(sto.OpenSubStorage(bls12381AddressSetKey)) -} - -func Open(sto *storage.Storage) *BLSTable { - return &BLSTable{ - sto, - addressSet.OpenAddressSet(sto.OpenSubStorage(legacyAddressSetKey)), - sto.OpenSubStorage(legacyTableByAddressKey), - addressSet.OpenAddressSet(sto.OpenSubStorage(bls12381AddressSetKey)), - sto.OpenSubStorage(bls12381TableByAddressKey), - } -} - -func (tab *BLSTable) GetLegacyPublicKey(addr common.Address) (*big.Int, *big.Int, *big.Int, *big.Int, error) { - isMember, err := tab.legacyAddressSet.IsMember(addr) - if err != nil { - return nil, nil, nil, nil, err - } - if !isMember { - return nil, nil, nil, nil, ethereum.NotFound - } - - key := common.BytesToHash(append(addr.Bytes(), byte(0))) - - x0, _ := tab.legacyTableByAddress.Get(key) - x1, _ := tab.legacyTableByAddress.Get(util.HashPlusInt(key, 1)) - y0, _ := tab.legacyTableByAddress.Get(util.HashPlusInt(key, 2)) - y1, err := tab.legacyTableByAddress.Get(util.HashPlusInt(key, 3)) - - return x0.Big(), x1.Big(), y0.Big(), y1.Big(), err -} - -func (tab *BLSTable) RegisterLegacyPublicKey(addr common.Address, x0, x1, y0, y1 *big.Int) error { - key := common.BytesToHash(append(addr.Bytes(), byte(0))) - - _ = tab.legacyTableByAddress.Set(key, common.BigToHash(x0)) - _ = tab.legacyTableByAddress.Set(util.HashPlusInt(key, 1), common.BigToHash(x1)) - _ = tab.legacyTableByAddress.Set(util.HashPlusInt(key, 2), common.BigToHash(y0)) - _ = tab.legacyTableByAddress.Set(util.HashPlusInt(key, 3), common.BigToHash(y1)) - return tab.legacyAddressSet.Add(addr) -} - -func (tab *BLSTable) RegisterBLS12381PublicKey(addr common.Address, key blsSignatures.PublicKey) error { - if err := tab.bls12381AddressSet.Add(addr); err != nil { - return err - } - - sbBytes := tab.bls12381TableByAddress.OpenStorageBackedBytes(addr.Bytes()) - return sbBytes.SetBytes(blsSignatures.PublicKeyToBytes(key)) -} - -func (tab *BLSTable) GetBLS12381PublicKey(addr common.Address) (blsSignatures.PublicKey, error) { - isMember, err := tab.bls12381AddressSet.IsMember(addr) - if err != nil { - return blsSignatures.PublicKey{}, err - } - if !isMember { - return blsSignatures.PublicKey{}, ethereum.NotFound - } - - sbBytes := tab.bls12381TableByAddress.OpenStorageBackedBytes(addr.Bytes()) - buf, err := sbBytes.GetBytes() - if err != nil { - return blsSignatures.PublicKey{}, err - } - return blsSignatures.PublicKeyFromBytes(buf, true) -} diff --git a/arbos/blsTable/bls_test.go b/arbos/blsTable/bls_test.go deleted file mode 100644 index 3784a63c9f..0000000000 --- a/arbos/blsTable/bls_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package blsTable - -import ( - "fmt" - "math" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/offchainlabs/nitro/arbos/burn" - "github.com/offchainlabs/nitro/arbos/storage" - "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/testhelpers" -) - -func TestLegacyBLS(t *testing.T) { - rand.Seed(time.Now().UTC().UnixNano()) - sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) - tab := Open(sto) - - maxInt64 := big.NewInt(math.MaxInt64) - - address := testhelpers.RandomAddress() - cases := [][]*big.Int{ - {big.NewInt(0), big.NewInt(16), big.NewInt(615), big.NewInt(1024)}, - {big.NewInt(32), big.NewInt(0), big.NewInt(808), big.NewInt(9364)}, - {maxInt64, arbmath.BigMulByFrac(maxInt64, math.MaxInt64, 2), big.NewInt(2), big.NewInt(0)}, - } - - for index, test := range cases { - Require(t, tab.RegisterLegacyPublicKey(address, test[0], test[1], test[2], test[3])) - x0, x1, y0, y1, err := tab.GetLegacyPublicKey(address) - Require(t, err, fmt.Sprintf( - "failed to set public key %d %s %s %s %s", - index, x0.String(), x1.String(), y0.String(), y1.String()), - ) - - if x0.Cmp(test[0]) != 0 || x1.Cmp(test[1]) != 0 || y0.Cmp(test[2]) != 0 || y1.Cmp(test[3]) != 0 { - Fail(t, "incorrect public key", index, test, x0, x1, y0, y1) - } - } -} - -func Require(t *testing.T, err error, printables ...interface{}) { - t.Helper() - testhelpers.RequireImpl(t, err, printables...) -} - -func Fail(t *testing.T, printables ...interface{}) { - t.Helper() - testhelpers.FailImpl(t, printables...) -} diff --git a/arbos/chains.go b/arbos/chains.go index 64aa4101f8..645fd5d23d 100644 --- a/arbos/chains.go +++ b/arbos/chains.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) -func GetChainConfig(chainId *big.Int) (*params.ChainConfig, error) { +func getStaticChainConfig(chainId *big.Int) (*params.ChainConfig, error) { for _, potentialChainConfig := range params.ArbitrumSupportedChainConfigs { if potentialChainConfig.ChainID.Cmp(chainId) == 0 { return potentialChainConfig, nil @@ -18,3 +18,12 @@ func GetChainConfig(chainId *big.Int) (*params.ChainConfig, error) { } return nil, fmt.Errorf("unsupported L2 chain ID %v", chainId) } + +func GetChainConfig(chainId *big.Int, genesisBlockNum uint64) (*params.ChainConfig, error) { + staticChainConfig, err := getStaticChainConfig(chainId) + if err != nil { + return nil, err + } + staticChainConfig.ArbitrumChainParams.GenesisBlockNum = genesisBlockNum + return staticChainConfig, nil +} diff --git a/arbos/incomingmessage.go b/arbos/incomingmessage.go index 83c486b5f5..77c44be060 100644 --- a/arbos/incomingmessage.go +++ b/arbos/incomingmessage.go @@ -16,8 +16,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/util/arbmath" ) const ( @@ -29,6 +31,7 @@ const ( L1MessageType_BatchForGasEstimation = 10 // probably won't use this in practice L1MessageType_Initialize = 11 L1MessageType_EthDeposit = 12 + L1MessageType_BatchPostingReport = 13 L1MessageType_Invalid = 0xFF ) @@ -169,7 +172,9 @@ func ParseIncomingL1Message(rd io.Reader) (*L1IncomingMessage, error) { }, nil } -func (msg *L1IncomingMessage) ParseL2Transactions(chainId *big.Int) (types.Transactions, error) { +type InfallibleBatchFetcher func(batchNum uint64) []byte + +func (msg *L1IncomingMessage) ParseL2Transactions(chainId *big.Int, batchFetcher InfallibleBatchFetcher) (types.Transactions, error) { if len(msg.L2msg) > MaxL2MessageSize { // ignore the message if l2msg is too large return nil, errors.New("message too large") @@ -199,7 +204,7 @@ func (msg *L1IncomingMessage) ParseL2Transactions(chainId *big.Int) (types.Trans ChainId: chainId, L1RequestId: depositRequestId, // Matches the From of parseUnsignedTx - To: util.RemapL1Address(msg.Header.Poster), + To: msg.Header.Poster, Value: tx.Value(), }) return types.Transactions{deposit, tx}, nil @@ -220,6 +225,12 @@ func (msg *L1IncomingMessage) ParseL2Transactions(chainId *big.Int) (types.Trans case L1MessageType_RollupEvent: log.Debug("ignoring rollup event message") return types.Transactions{}, nil + case L1MessageType_BatchPostingReport: + tx, err := parseBatchPostingReportMessage(bytes.NewReader(msg.L2msg), chainId, batchFetcher) + if err != nil { + return nil, err + } + return types.Transactions{tx}, nil case L1MessageType_Invalid: // intentionally invalid message return nil, errors.New("invalid message") @@ -237,7 +248,8 @@ func (msg *L1IncomingMessage) ParseInitMessage() (*big.Int, error) { if len(msg.L2msg) != 32 { return nil, fmt.Errorf("invalid init message data %v", hex.EncodeToString(msg.L2msg)) } - return new(big.Int).SetBytes(msg.L2msg), nil + chainId := new(big.Int).SetBytes(msg.L2msg[:32]) + return chainId, nil } const ( @@ -370,7 +382,7 @@ func parseUnsignedTx(rd io.Reader, poster common.Address, requestId *common.Hash case L2MessageKind_UnsignedUserTx: inner = &types.ArbitrumUnsignedTx{ ChainId: chainId, - From: util.RemapL1Address(poster), + From: poster, Nonce: nonce, GasFeeCap: maxFeePerGas.Big(), Gas: gasLimit.Big().Uint64(), @@ -385,7 +397,7 @@ func parseUnsignedTx(rd io.Reader, poster common.Address, requestId *common.Hash inner = &types.ArbitrumContractTx{ ChainId: chainId, RequestId: *requestId, - From: util.RemapL1Address(poster), + From: poster, GasFeeCap: maxFeePerGas.Big(), Gas: gasLimit.Big().Uint64(), To: destination, @@ -400,6 +412,10 @@ func parseUnsignedTx(rd io.Reader, poster common.Address, requestId *common.Hash } func parseEthDepositMessage(rd io.Reader, header *L1IncomingMessageHeader, chainId *big.Int) (*types.Transaction, error) { + to, err := util.AddressFromReader(rd) + if err != nil { + return nil, err + } balance, err := util.HashFromReader(rd) if err != nil { return nil, err @@ -410,7 +426,8 @@ func parseEthDepositMessage(rd io.Reader, header *L1IncomingMessageHeader, chain tx := &types.ArbitrumDepositTx{ ChainId: chainId, L1RequestId: *header.RequestId, - To: util.RemapL1Address(header.Poster), + From: header.Poster, + To: to, Value: balance.Big(), } return types.NewTx(tx), nil @@ -481,7 +498,7 @@ func parseSubmitRetryableMessage(rd io.Reader, header *L1IncomingMessageHeader, tx := &types.ArbitrumSubmitRetryableTx{ ChainId: chainId, RequestId: *header.RequestId, - From: util.RemapL1Address(header.Poster), + From: header.Poster, L1BaseFee: header.L1BaseFee, DepositValue: depositValue.Big(), GasFeeCap: maxFeePerGas.Big(), @@ -495,3 +512,54 @@ func parseSubmitRetryableMessage(rd io.Reader, header *L1IncomingMessageHeader, } return types.NewTx(tx), err } + +func parseBatchPostingReportMessage(rd io.Reader, chainId *big.Int, batchFetcher InfallibleBatchFetcher) (*types.Transaction, error) { + batchTimestamp, err := util.HashFromReader(rd) + if err != nil { + return nil, err + } + batchPosterAddr, err := util.AddressFromReader(rd) + if err != nil { + return nil, err + } + _, err = util.HashFromReader(rd) // unused: data hash + if err != nil { + return nil, err + } + batchNumHash, err := util.HashFromReader(rd) + if err != nil { + return nil, err + } + batchNum := batchNumHash.Big().Uint64() + + l1BaseFee, err := util.HashFromReader(rd) + if err != nil { + return nil, err + } + batchData := batchFetcher(batchNum) + var batchDataGas uint64 + for _, b := range batchData { + if b == 0 { + batchDataGas += params.TxDataZeroGas + } else { + batchDataGas += params.TxDataNonZeroGasEIP2028 + } + } + + // the poster also pays to keccak the batch and place it and a batch-posting report into the inbox + keccakWords := arbmath.WordsForBytes(uint64(len(batchData))) + batchDataGas += params.Keccak256Gas + (keccakWords * params.Keccak256WordGas) + batchDataGas += 2 * params.SstoreSetGasEIP2200 + + data, err := util.PackInternalTxDataBatchPostingReport( + batchTimestamp.Big(), batchPosterAddr, batchNum, batchDataGas, l1BaseFee.Big(), + ) + if err != nil { + return nil, err + } + return types.NewTx(&types.ArbitrumInternalTx{ + ChainId: chainId, + Data: data, + // don't need to fill in the other fields, since they exist only to ensure uniqueness, and batchNum is already unique + }), nil +} diff --git a/arbos/incomingmessage_test.go b/arbos/incomingmessage_test.go index f5ac4b2dd6..4aa3c86c41 100644 --- a/arbos/incomingmessage_test.go +++ b/arbos/incomingmessage_test.go @@ -34,7 +34,7 @@ func TestSerializeAndParseL1Message(t *testing.T) { if err != nil { t.Error(err) } - txes, err := newMsg.ParseL2Transactions(chainId) + txes, err := newMsg.ParseL2Transactions(chainId, nil) if err != nil { t.Error(err) } diff --git a/arbos/internal_tx.go b/arbos/internal_tx.go index 452196ad84..11e94e66bf 100644 --- a/arbos/internal_tx.go +++ b/arbos/internal_tx.go @@ -7,6 +7,10 @@ import ( "fmt" "math/big" + "github.com/offchainlabs/nitro/util/arbmath" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -14,12 +18,6 @@ import ( "github.com/offchainlabs/nitro/arbos/util" ) -// Types of ArbitrumInternalTx, distinguished by the first data byte -const ( - // Contains 8 bytes indicating the big endian L1 block number to set - arbInternalTxStartBlock uint8 = 0 -) - func InternalTxStartBlock( chainId, l1BaseFee *big.Int, @@ -34,57 +32,64 @@ func InternalTxStartBlock( if l1BaseFee == nil { l1BaseFee = big.NewInt(0) } - data, err := util.PackInternalTxDataStartBlock(l1BaseFee, lastHeader.BaseFee, l1BlockNum, timePassed) + data, err := util.PackInternalTxDataStartBlock(l1BaseFee, l1BlockNum, l2BlockNum, timePassed) if err != nil { panic(fmt.Sprintf("Failed to pack internal tx %v", err)) } return &types.ArbitrumInternalTx{ - ChainId: chainId, - SubType: arbInternalTxStartBlock, - Data: data, - L2BlockNumber: l2BlockNum, + ChainId: chainId, + Data: data, } } func ApplyInternalTxUpdate(tx *types.ArbitrumInternalTx, state *arbosState.ArbosState, evm *vm.EVM) { - inputs, err := util.UnpackInternalTxDataStartBlock(tx.Data) - if err != nil { - panic(err) - } - l1BaseFee, _ := inputs[0].(*big.Int) // current block's - l2BaseFee, _ := inputs[1].(*big.Int) // the last L2 block's base fee (which is the result of the calculation 2 blocks ago) - l1BlockNumber, _ := inputs[2].(uint64) // current block's - timePassed, _ := inputs[3].(uint64) // since last block - - nextL1BlockNumber, err := state.Blockhashes().NextBlockNumber() - state.Restrict(err) - - if state.FormatVersion() >= 3 { - // The `l2BaseFee` in the tx data is indeed the last block's base fee, - // however, for the purposes of this function, we need the previous computed base fee. - // Since the computed base fee takes one block to apply, the last block's base fee - // is actually two calculations ago. Instead, as of ArbOS format version 3, - // we use the current state's base fee, which is the result of the last calculation. - l2BaseFee, err = state.L2PricingState().BaseFeeWei() + switch *(*[4]byte)(tx.Data[:4]) { + case InternalTxStartBlockMethodID: + inputs, err := util.UnpackInternalTxDataStartBlock(tx.Data) + if err != nil { + panic(err) + } + l1BlockNumber, _ := inputs[1].(uint64) // current block's + timePassed, _ := inputs[2].(uint64) // since last block + + nextL1BlockNumber, err := state.Blockhashes().NextBlockNumber() + state.Restrict(err) + + l2BaseFee, err := state.L2PricingState().BaseFeeWei() state.Restrict(err) - } - if l1BlockNumber >= nextL1BlockNumber { - var prevHash common.Hash - if evm.Context.BlockNumber.Sign() > 0 { - prevHash = evm.Context.GetHash(evm.Context.BlockNumber.Uint64() - 1) + if l1BlockNumber >= nextL1BlockNumber { + var prevHash common.Hash + if evm.Context.BlockNumber.Sign() > 0 { + prevHash = evm.Context.GetHash(evm.Context.BlockNumber.Uint64() - 1) + } + state.Restrict(state.Blockhashes().RecordNewL1Block(l1BlockNumber, prevHash)) } - state.Restrict(state.Blockhashes().RecordNewL1Block(l1BlockNumber, prevHash)) - } - currentTime := evm.Context.Time.Uint64() + currentTime := evm.Context.Time.Uint64() - // Try to reap 2 retryables - _ = state.RetryableState().TryToReapOneRetryable(currentTime, evm, util.TracingDuringEVM) - _ = state.RetryableState().TryToReapOneRetryable(currentTime, evm, util.TracingDuringEVM) + // Try to reap 2 retryables + _ = state.RetryableState().TryToReapOneRetryable(currentTime, evm, util.TracingDuringEVM) + _ = state.RetryableState().TryToReapOneRetryable(currentTime, evm, util.TracingDuringEVM) - state.L2PricingState().UpdatePricingModel(l2BaseFee, timePassed, state.FormatVersion(), false) - state.L1PricingState().UpdatePricingModel(l1BaseFee, currentTime) + state.L2PricingState().UpdatePricingModel(l2BaseFee, timePassed, false) - state.UpgradeArbosVersionIfNecessary(currentTime, evm.ChainConfig()) + state.UpgradeArbosVersionIfNecessary(currentTime, evm.ChainConfig()) + case InternalTxBatchPostingReportMethodID: + inputs, err := util.UnpackInternalTxDataBatchPostingReport(tx.Data) + if err != nil { + panic(err) + } + batchTimestamp, _ := inputs[0].(*big.Int) + batchPosterAddress, _ := inputs[1].(common.Address) + // ignore input[2], batchNumber, which exist because we might need them in the future + batchDataGas, _ := inputs[3].(uint64) + l1BaseFeeWei, _ := inputs[4].(*big.Int) + + weiSpent := arbmath.BigMulByUint(l1BaseFeeWei, batchDataGas) + err = state.L1PricingState().UpdateForBatchPosterSpending(evm.StateDB, evm, batchTimestamp.Uint64(), evm.Context.Time.Uint64(), batchPosterAddress, weiSpent) + if err != nil { + log.Warn("L1Pricing UpdateForSequencerSpending failed", "err", err) + } + } } diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go new file mode 100644 index 0000000000..6d332706ea --- /dev/null +++ b/arbos/l1pricing/batchPoster.go @@ -0,0 +1,171 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package l1pricing + +import ( + "errors" + "math" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbos/addressSet" + "github.com/offchainlabs/nitro/arbos/storage" + "github.com/offchainlabs/nitro/util/arbmath" +) + +const totalFundsDueOffset = 0 + +var ( + PosterAddrsKey = []byte{0} + PosterInfoKey = []byte{1} + + ErrAlreadyExists = errors.New("tried to add a batch poster that already exists") + ErrNotExist = errors.New("tried to open a batch poster that does not exist") +) + +// layout of storage in the table +type BatchPostersTable struct { + posterAddrs *addressSet.AddressSet + posterInfo *storage.Storage + totalFundsDue storage.StorageBackedBigInt +} + +type BatchPosterState struct { + fundsDue storage.StorageBackedBigInt + payTo storage.StorageBackedAddress + postersTable *BatchPostersTable +} + +func InitializeBatchPostersTable(storage *storage.Storage) error { + totalFundsDue := storage.OpenStorageBackedBigInt(totalFundsDueOffset) + if err := totalFundsDue.Set(common.Big0); err != nil { + return err + } + return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey)) +} + +func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { + return &BatchPostersTable{ + posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey)), + posterInfo: storage.OpenSubStorage(PosterInfoKey), + totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), + } +} + +func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist bool) (*BatchPosterState, error) { + isBatchPoster, err := bpt.posterAddrs.IsMember(poster) + if err != nil { + return nil, err + } + if !isBatchPoster { + if !createIfNotExist { + return nil, ErrNotExist + } + return bpt.AddPoster(poster, poster) + } + return bpt.internalOpen(poster), nil +} + +func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) + return &BatchPosterState{ + fundsDue: bpStorage.OpenStorageBackedBigInt(0), + payTo: bpStorage.OpenStorageBackedAddress(1), + postersTable: bpt, + } +} + +func (bpt *BatchPostersTable) ContainsPoster(poster common.Address) (bool, error) { + return bpt.posterAddrs.IsMember(poster) +} + +func (bpt *BatchPostersTable) AddPoster(posterAddress common.Address, payTo common.Address) (*BatchPosterState, error) { + isBatchPoster, err := bpt.posterAddrs.IsMember(posterAddress) + if err != nil { + return nil, err + } + if isBatchPoster { + return nil, ErrAlreadyExists + } + bpState := bpt.internalOpen(posterAddress) + if err := bpState.fundsDue.Set(common.Big0); err != nil { + return nil, err + } + if err := bpState.payTo.Set(payTo); err != nil { + return nil, err + } + + if err := bpt.posterAddrs.Add(posterAddress); err != nil { + return nil, err + } + + return bpState, nil +} + +func (bpt *BatchPostersTable) AllPosters(maxNumToGet uint64) ([]common.Address, error) { + return bpt.posterAddrs.AllMembers(maxNumToGet) +} + +func (bpt *BatchPostersTable) TotalFundsDue() (*big.Int, error) { + return bpt.totalFundsDue.Get() +} + +func (bps *BatchPosterState) FundsDue() (*big.Int, error) { + return bps.fundsDue.Get() +} + +func (bps *BatchPosterState) SetFundsDue(val *big.Int) error { + fundsDue := bps.fundsDue + totalFundsDue := bps.postersTable.totalFundsDue + prev, err := fundsDue.Get() + if err != nil { + return err + } + prevTotal, err := totalFundsDue.Get() + if err != nil { + return err + } + if err := totalFundsDue.Set(arbmath.BigSub(arbmath.BigAdd(prevTotal, val), prev)); err != nil { + return err + } + return bps.fundsDue.Set(val) +} + +func (bps *BatchPosterState) PayTo() (common.Address, error) { + return bps.payTo.Get() +} + +func (bps *BatchPosterState) SetPayTo(addr common.Address) error { + return bps.payTo.Set(addr) +} + +type FundsDueItem struct { + dueTo common.Address + balance *big.Int +} + +func (bpt *BatchPostersTable) GetFundsDueList() ([]FundsDueItem, error) { + ret := []FundsDueItem{} + allPosters, err := bpt.AllPosters(math.MaxUint64) + if err != nil { + return nil, err + } + for _, posterAddr := range allPosters { + poster, err := bpt.OpenPoster(posterAddr, false) + if err != nil { + return nil, err + } + due, err := poster.FundsDue() + if err != nil { + return nil, err + } + if due.Sign() > 0 { + ret = append(ret, FundsDueItem{ + dueTo: posterAddr, + balance: due, + }) + } + } + return ret, nil +} diff --git a/arbos/l1pricing/batchPoster_test.go b/arbos/l1pricing/batchPoster_test.go new file mode 100644 index 0000000000..5b47534b23 --- /dev/null +++ b/arbos/l1pricing/batchPoster_test.go @@ -0,0 +1,114 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package l1pricing + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/offchainlabs/nitro/arbos/burn" + "github.com/offchainlabs/nitro/arbos/storage" + "math/big" + "testing" +) + +func TestBatchPosterTable(t *testing.T) { + sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) + err := InitializeBatchPostersTable(sto) + Require(t, err) + + bpTable := OpenBatchPostersTable(sto) + + addr1 := common.Address{1, 2, 3} + pay1 := common.Address{4, 5, 6, 7} + addr2 := common.Address{2, 4, 6} + pay2 := common.Address{8, 10, 12, 14} + + // test creation and counting of bps + allPosters, err := bpTable.AllPosters(math.MaxUint64) + Require(t, err) + if len(allPosters) != 0 { + t.Fatal() + } + exists, err := bpTable.ContainsPoster(addr1) + Require(t, err) + if exists { + t.Fatal() + } + + bp1, err := bpTable.AddPoster(addr1, pay1) + Require(t, err) + getPay1, err := bp1.PayTo() + Require(t, err) + if getPay1 != pay1 { + t.Fatal() + } + getDue1, err := bp1.FundsDue() + Require(t, err) + if getDue1.Sign() != 0 { + t.Fatal() + } + exists, err = bpTable.ContainsPoster(addr1) + Require(t, err) + if !exists { + t.Fatal() + } + + bp2, err := bpTable.AddPoster(addr2, pay2) + Require(t, err) + _ = bp2 + getPay2, err := bp2.PayTo() + Require(t, err) + if getPay2 != pay2 { + t.Fatal() + } + getDue2, err := bp2.FundsDue() + Require(t, err) + if getDue2.Sign() != 0 { + t.Fatal() + } + exists, err = bpTable.ContainsPoster(addr2) + Require(t, err) + if !exists { + t.Fatal() + } + + allPosters, err = bpTable.AllPosters(math.MaxUint64) + Require(t, err) + if len(allPosters) != 2 { + t.Fatal() + } + + // test get/set of BP fields + bp1, err = bpTable.OpenPoster(addr1, false) + Require(t, err) + err = bp1.SetPayTo(addr2) + Require(t, err) + getPay1, err = bp1.PayTo() + Require(t, err) + if getPay1 != addr2 { + t.Fatal() + } + err = bp1.SetFundsDue(big.NewInt(13)) + Require(t, err) + getDue1, err = bp1.FundsDue() + Require(t, err) + if getDue1.Uint64() != 13 { + t.Fatal() + } + + // test adding up the fundsDue + err = bp2.SetFundsDue(big.NewInt(42)) + Require(t, err) + getDue2, err = bp2.FundsDue() + Require(t, err) + if getDue2.Uint64() != 42 { + t.Fatal() + } + + totalDue, err := bpTable.TotalFundsDue() + Require(t, err) + if totalDue.Uint64() != 13+42 { + t.Fatal() + } +} diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 99c1e39327..2ec933c2ac 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -9,238 +9,376 @@ import ( "math/big" "sync/atomic" + "github.com/ethereum/go-ethereum/common/math" + + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbcompress" - "github.com/offchainlabs/nitro/arbos/addressSet" "github.com/offchainlabs/nitro/util/arbmath" + am "github.com/offchainlabs/nitro/util/arbmath" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" "github.com/offchainlabs/nitro/arbos/util" ) type L1PricingState struct { - storage *storage.Storage - defaultAggregator storage.StorageBackedAddress - l1BaseFeeEstimate storage.StorageBackedBigInt - l1BaseFeeEstimateInertia storage.StorageBackedUint64 - lastL1BaseFeeUpdateTime storage.StorageBackedUint64 - userSpecifiedAggregators *storage.Storage - refuseDefaultAggregator *storage.Storage - aggregatorFeeCollectors *storage.Storage - aggregatorCompressionRatios *storage.Storage + storage *storage.Storage + + // parameters + batchPosterTable *BatchPostersTable + payRewardsTo storage.StorageBackedAddress + equilibrationUnits storage.StorageBackedBigInt + inertia storage.StorageBackedUint64 + perUnitReward storage.StorageBackedUint64 + // variables + lastUpdateTime storage.StorageBackedUint64 // timestamp of the last update from L1 that we processed + fundsDueForRewards storage.StorageBackedBigInt + // funds collected since update are recorded as the balance in account L1PricerFundsPoolAddress + unitsSinceUpdate storage.StorageBackedUint64 // calldata units collected for since last update + pricePerUnit storage.StorageBackedBigInt // current price per calldata unit } var ( - SequencerAddress = common.HexToAddress("0xA4B000000000000000000073657175656e636572") + BatchPosterTableKey = []byte{0} + BatchPosterAddress = common.HexToAddress("0xA4B000000000000000000073657175656e636572") + BatchPosterPayToAddress = BatchPosterAddress + L1PricerFundsPoolAddress = common.HexToAddress("0xA4B00000000000000000000000000000000000f6") - userSpecifiedAggregatorKey = []byte{0} - refuseDefaultAggregatorKey = []byte{1} - aggregatorFeeCollectorKey = []byte{2} - aggregatorCompressionRatioKey = []byte{3} + ErrInvalidTime = errors.New("invalid timestamp") ) const ( - defaultAggregatorAddressOffset uint64 = 0 - l1BaseFeeEstimateOffset uint64 = 1 - l1BaseFeeEstimateInertiaOffset uint64 = 2 - lastL1BaseFeeUpdateTimeOffset uint64 = 3 + payRewardsToOffset uint64 = iota + equilibrationUnitsOffset + inertiaOffset + perUnitRewardOffset + lastUpdateTimeOffset + fundsDueForRewardsOffset + unitsSinceOffset + pricePerUnitOffset ) -const InitialL1BaseFeeEstimate = 50 * params.GWei -const InitialL1BaseFeeEstimateInertia = 24 +const ( + InitialEquilibrationUnits uint64 = 60 * params.TxDataNonZeroGasEIP2028 * 100000 // one minute at 100000 bytes / sec + InitialInertia = 10 + InitialPerUnitReward = 10 + InitialPricePerUnitWei = 50 * params.GWei +) func InitializeL1PricingState(sto *storage.Storage) error { - err := sto.SetByUint64(defaultAggregatorAddressOffset, common.BytesToHash(SequencerAddress.Bytes())) - if err != nil { + bptStorage := sto.OpenSubStorage(BatchPosterTableKey) + if err := InitializeBatchPostersTable(bptStorage); err != nil { + return err + } + bpTable := OpenBatchPostersTable(bptStorage) + if _, err := bpTable.AddPoster(BatchPosterAddress, BatchPosterPayToAddress); err != nil { + return err + } + if err := sto.SetByUint64(payRewardsToOffset, util.AddressToHash(BatchPosterAddress)); err != nil { + return err + } + equilibrationUnits := sto.OpenStorageBackedBigInt(equilibrationUnitsOffset) + if err := equilibrationUnits.Set(am.UintToBig(InitialEquilibrationUnits)); err != nil { + return err + } + if err := sto.SetUint64ByUint64(inertiaOffset, InitialInertia); err != nil { return err } - if err := sto.SetUint64ByUint64(l1BaseFeeEstimateInertiaOffset, InitialL1BaseFeeEstimateInertia); err != nil { + fundsDueForRewards := sto.OpenStorageBackedBigInt(fundsDueForRewardsOffset) + if err := fundsDueForRewards.Set(common.Big0); err != nil { return err } - if err := sto.SetUint64ByUint64(l1BaseFeeEstimateOffset, InitialL1BaseFeeEstimate); err != nil { + if err := sto.SetUint64ByUint64(perUnitRewardOffset, InitialPerUnitReward); err != nil { return err } - return sto.SetUint64ByUint64(lastL1BaseFeeUpdateTimeOffset, 0) + pricePerUnit := sto.OpenStorageBackedBigInt(pricePerUnitOffset) + return pricePerUnit.SetByUint(InitialPricePerUnitWei) } func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - sto.OpenStorageBackedAddress(defaultAggregatorAddressOffset), - sto.OpenStorageBackedBigInt(l1BaseFeeEstimateOffset), - sto.OpenStorageBackedUint64(l1BaseFeeEstimateInertiaOffset), - sto.OpenStorageBackedUint64(lastL1BaseFeeUpdateTimeOffset), - sto.OpenSubStorage(userSpecifiedAggregatorKey), - sto.OpenSubStorage(refuseDefaultAggregatorKey), - sto.OpenSubStorage(aggregatorFeeCollectorKey), - sto.OpenSubStorage(aggregatorCompressionRatioKey), + OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey)), + sto.OpenStorageBackedAddress(payRewardsToOffset), + sto.OpenStorageBackedBigInt(equilibrationUnitsOffset), + sto.OpenStorageBackedUint64(inertiaOffset), + sto.OpenStorageBackedUint64(perUnitRewardOffset), + sto.OpenStorageBackedUint64(lastUpdateTimeOffset), + sto.OpenStorageBackedBigInt(fundsDueForRewardsOffset), + sto.OpenStorageBackedUint64(unitsSinceOffset), + sto.OpenStorageBackedBigInt(pricePerUnitOffset), } } -func (ps *L1PricingState) DefaultAggregator() (common.Address, error) { - return ps.defaultAggregator.Get() +func (ps *L1PricingState) BatchPosterTable() *BatchPostersTable { + return ps.batchPosterTable } -func (ps *L1PricingState) SetDefaultAggregator(val common.Address) error { - return ps.defaultAggregator.Set(val) +func (ps *L1PricingState) PayRewardsTo() (common.Address, error) { + return ps.payRewardsTo.Get() } -func (ps *L1PricingState) L1BaseFeeEstimateWei() (*big.Int, error) { - return ps.l1BaseFeeEstimate.Get() +func (ps *L1PricingState) SetPayRewardsTo(addr common.Address) error { + return ps.payRewardsTo.Set(addr) } -func (ps *L1PricingState) SetL1BaseFeeEstimateWei(val *big.Int) error { - return ps.l1BaseFeeEstimate.Set(val) +func (ps *L1PricingState) EquilibrationUnits() (*big.Int, error) { + return ps.equilibrationUnits.Get() } -func (ps *L1PricingState) LastL1BaseFeeUpdateTime() (uint64, error) { - return ps.lastL1BaseFeeUpdateTime.Get() +func (ps *L1PricingState) SetEquilibrationUnits(equilUnits *big.Int) error { + return ps.equilibrationUnits.Set(equilUnits) } -func (ps *L1PricingState) SetLastL1BaseFeeUpdateTime(t uint64) error { - return ps.lastL1BaseFeeUpdateTime.Set(t) +func (ps *L1PricingState) Inertia() (uint64, error) { + return ps.inertia.Get() } -// Update the pricing model with info from the start of a block -func (ps *L1PricingState) UpdatePricingModel(baseFeeSample *big.Int, currentTime uint64) { +func (ps *L1PricingState) SetInertia(inertia uint64) error { + return ps.inertia.Set(inertia) +} - if baseFeeSample.Sign() == 0 { - // The sequencer's normal messages do not include the l1 basefee, so ignore them - return - } +func (ps *L1PricingState) PerUnitReward() (uint64, error) { + return ps.perUnitReward.Get() +} - // update the l1 basefee estimate, which is the weighted average of the past and present - // basefee' = weighted average of the historical rate and the current, discounting time passed - // basefee' = (memory * basefee + sqrt(passed) * sample) / (memory + sqrt(passed)) - // - baseFee, _ := ps.L1BaseFeeEstimateWei() - inertia, _ := ps.L1BaseFeeEstimateInertia() - lastTime, _ := ps.LastL1BaseFeeUpdateTime() - if currentTime <= lastTime { - return - } - passedSqrt := arbmath.ApproxSquareRoot(currentTime - lastTime) - newBaseFee := arbmath.BigDivByUint( - arbmath.BigAdd(arbmath.BigMulByUint(baseFee, inertia), arbmath.BigMulByUint(baseFeeSample, passedSqrt)), - inertia+passedSqrt, - ) +func (ps *L1PricingState) SetPerUnitReward(weiPerUnit uint64) error { + return ps.perUnitReward.Set(weiPerUnit) +} + +func (ps *L1PricingState) LastUpdateTime() (uint64, error) { + return ps.lastUpdateTime.Get() +} - _ = ps.SetL1BaseFeeEstimateWei(newBaseFee) - _ = ps.SetLastL1BaseFeeUpdateTime(currentTime) +func (ps *L1PricingState) SetLastUpdateTime(t uint64) error { + return ps.lastUpdateTime.Set(t) } -// Get how slowly ArbOS updates its estimate of the L1 basefee -func (ps *L1PricingState) L1BaseFeeEstimateInertia() (uint64, error) { - return ps.l1BaseFeeEstimateInertia.Get() +func (ps *L1PricingState) FundsDueForRewards() (*big.Int, error) { + return ps.fundsDueForRewards.Get() } -// Set how slowly ArbOS updates its estimate of the L1 basefee -func (ps *L1PricingState) SetL1BaseFeeEstimateInertia(inertia uint64) error { - return ps.l1BaseFeeEstimateInertia.Set(inertia) +func (ps *L1PricingState) SetFundsDueForRewards(amt *big.Int) error { + return ps.fundsDueForRewards.Set(amt) } -func (ps *L1PricingState) userSpecifiedAggregatorsForAddress(sender common.Address) *addressSet.AddressSet { - return addressSet.OpenAddressSet(ps.userSpecifiedAggregators.OpenSubStorage(sender.Bytes())) +func (ps *L1PricingState) UnitsSinceUpdate() (uint64, error) { + return ps.unitsSinceUpdate.Get() } -// Get sender's user-specified aggregator, or nil if there is none. This does NOT fall back to the default aggregator -// if there is no user-specified aggregator. If that is what you want, call ReimbursableAggregatorForSender instead. -func (ps *L1PricingState) UserSpecifiedAggregator(sender common.Address) (*common.Address, error) { - return ps.userSpecifiedAggregatorsForAddress(sender).GetAnyMember() +func (ps *L1PricingState) SetUnitsSinceUpdate(units uint64) error { + return ps.unitsSinceUpdate.Set(units) } -func (ps *L1PricingState) SetUserSpecifiedAggregator(sender common.Address, maybeAggregator *common.Address) error { - paSet := ps.userSpecifiedAggregatorsForAddress(sender) - if err := paSet.Clear(); err != nil { +func (ps *L1PricingState) AddToUnitsSinceUpdate(units uint64) error { + oldUnits, err := ps.unitsSinceUpdate.Get() + if err != nil { return err } - if maybeAggregator == nil { - return nil - } - return paSet.Add(*maybeAggregator) + return ps.unitsSinceUpdate.Set(oldUnits + units) } -func (ps *L1PricingState) RefusesDefaultAggregator(addr common.Address) (bool, error) { - val, err := ps.refuseDefaultAggregator.Get(common.BytesToHash(addr.Bytes())) +func (ps *L1PricingState) PricePerUnit() (*big.Int, error) { + return ps.pricePerUnit.Get() +} + +func (ps *L1PricingState) SetPricePerUnit(price *big.Int) error { + return ps.pricePerUnit.Set(price) +} + +func (ps *L1PricingState) L1BaseFeeEstimate() (*big.Int, error) { + perUnit, err := ps.pricePerUnit.Get() if err != nil { - return false, err + return nil, err } - return val != (common.Hash{}), nil + return arbmath.BigMulByUint(perUnit, 16), nil } -func (ps *L1PricingState) SetRefusesDefaultAggregator(addr common.Address, refuses bool) error { - val := uint64(0) - if refuses { - val = 1 +// Update the pricing model based on a payment by a batch poster +func (ps *L1PricingState) UpdateForBatchPosterSpending(statedb vm.StateDB, evm *vm.EVM, updateTime uint64, currentTime uint64, batchPoster common.Address, weiSpent *big.Int) error { + batchPosterTable := ps.BatchPosterTable() + posterState, err := batchPosterTable.OpenPoster(batchPoster, true) + if err != nil { + return err } - return ps.refuseDefaultAggregator.Set(common.BytesToHash(addr.Bytes()), common.BigToHash(arbmath.UintToBig(val))) -} -// Get the aggregator who is eligible to be reimbursed for L1 costs of txs from sender, or nil if there is none. -func (ps *L1PricingState) ReimbursableAggregatorForSender(sender common.Address) (*common.Address, error) { - fromTable, err := ps.UserSpecifiedAggregator(sender) + // compute previous shortfall + totalFundsDue, err := batchPosterTable.TotalFundsDue() if err != nil { - return nil, err + return err } - if fromTable != nil { - return fromTable, nil + fundsDueForRewards, err := ps.FundsDueForRewards() + if err != nil { + return err } + oldSurplus := am.BigSub(statedb.GetBalance(L1PricerFundsPoolAddress), am.BigAdd(totalFundsDue, fundsDueForRewards)) - refuses, err := ps.RefusesDefaultAggregator(sender) - if err != nil || refuses { - return nil, err + // compute allocation fraction -- will allocate updateTimeDelta/timeDelta fraction of units and funds to this update + lastUpdateTime, err := ps.LastUpdateTime() + if err != nil { + return err } - aggregator, err := ps.DefaultAggregator() + if lastUpdateTime == 0 && currentTime > 0 { // it's the first update, so there isn't a last update time + lastUpdateTime = updateTime - 1 + } + if updateTime >= currentTime || updateTime < lastUpdateTime { + return ErrInvalidTime + } + updateTimeDelta := updateTime - lastUpdateTime + timeDelta := currentTime - lastUpdateTime + + // allocate units to this update + unitsSinceUpdate, err := ps.UnitsSinceUpdate() if err != nil { - return nil, err + return err } - if aggregator == (common.Address{}) { - return nil, nil + unitsAllocated := unitsSinceUpdate * updateTimeDelta / timeDelta + unitsSinceUpdate -= unitsAllocated + if err := ps.SetUnitsSinceUpdate(unitsSinceUpdate); err != nil { + return err } - return &aggregator, nil -} + dueToPoster, err := posterState.FundsDue() + if err != nil { + return err + } + err = posterState.SetFundsDue(am.BigAdd(dueToPoster, weiSpent)) + if err != nil { + return err + } + perUnitReward, err := ps.PerUnitReward() + if err != nil { + return err + } + fundsDueForRewards = am.BigAdd(fundsDueForRewards, am.BigMulByUint(am.UintToBig(unitsAllocated), perUnitReward)) + if err := ps.SetFundsDueForRewards(fundsDueForRewards); err != nil { + return err + } -func (ps *L1PricingState) SetAggregatorFeeCollector(aggregator common.Address, addr common.Address) error { - return ps.aggregatorFeeCollectors.Set(common.BytesToHash(aggregator.Bytes()), common.BytesToHash(addr.Bytes())) -} + // allocate funds to this update + collectedSinceUpdate := statedb.GetBalance(L1PricerFundsPoolAddress) + availableFunds := am.BigDivByUint(am.BigMulByUint(collectedSinceUpdate, updateTimeDelta), timeDelta) -func (ps *L1PricingState) AggregatorFeeCollector(aggregator common.Address) (common.Address, error) { - raw, err := ps.aggregatorFeeCollectors.Get(common.BytesToHash(aggregator.Bytes())) - if raw == (common.Hash{}) { - return aggregator, err - } else { - return common.BytesToAddress(raw.Bytes()), err + // pay rewards, as much as possible + paymentForRewards := am.BigMulByUint(am.UintToBig(perUnitReward), unitsAllocated) + if am.BigLessThan(availableFunds, paymentForRewards) { + paymentForRewards = availableFunds } -} + fundsDueForRewards = am.BigSub(fundsDueForRewards, paymentForRewards) + if err := ps.SetFundsDueForRewards(fundsDueForRewards); err != nil { + return err + } + payRewardsTo, err := ps.PayRewardsTo() + if err != nil { + return err + } + err = util.TransferBalance(&L1PricerFundsPoolAddress, &payRewardsTo, paymentForRewards, evm, util.TracingBeforeEVM, "batchPosterReward") + if err != nil { + return err + } + availableFunds = am.BigSub(availableFunds, paymentForRewards) -func (ps *L1PricingState) AggregatorCompressionRatio(aggregator common.Address) (arbmath.Bips, error) { - raw, err := ps.aggregatorCompressionRatios.Get(common.BytesToHash(aggregator.Bytes())) - if raw == (common.Hash{}) { - return arbmath.OneInBips, err - } else { - return arbmath.BigToBips(raw.Big()), err + // settle up our batch poster payments owed, as much as possible + allPosterAddrs, err := batchPosterTable.AllPosters(math.MaxUint64) + if err != nil { + return err + } + for _, posterAddr := range allPosterAddrs { + poster, err := batchPosterTable.OpenPoster(posterAddr, false) + if err != nil { + return err + } + balanceDueToPoster, err := poster.FundsDue() + if err != nil { + return err + } + balanceToTransfer := balanceDueToPoster + if am.BigLessThan(availableFunds, balanceToTransfer) { + balanceToTransfer = availableFunds + } + if balanceToTransfer.Sign() > 0 { + addrToPay, err := poster.PayTo() + if err != nil { + return err + } + err = util.TransferBalance(&L1PricerFundsPoolAddress, &addrToPay, balanceToTransfer, evm, util.TracingBeforeEVM, "batchPosterRefund") + if err != nil { + return err + } + availableFunds = am.BigSub(availableFunds, balanceToTransfer) + balanceDueToPoster = am.BigSub(balanceDueToPoster, balanceToTransfer) + err = poster.SetFundsDue(balanceDueToPoster) + if err != nil { + return err + } + } } -} -func (ps *L1PricingState) SetAggregatorCompressionRatio(aggregator common.Address, ratio arbmath.Bips) error { - if ratio > arbmath.PercentToBips(200) { - return errors.New("compression ratio out of bounds") + // update time + if err := ps.SetLastUpdateTime(updateTime); err != nil { + return err } - return ps.aggregatorCompressionRatios.Set(util.AddressToHash(aggregator), util.UintToHash(uint64(ratio))) + + // adjust the price + if unitsAllocated > 0 { + totalFundsDue, err = batchPosterTable.TotalFundsDue() + if err != nil { + return err + } + fundsDueForRewards, err = ps.FundsDueForRewards() + if err != nil { + return err + } + surplus := am.BigSub(statedb.GetBalance(L1PricerFundsPoolAddress), am.BigAdd(totalFundsDue, fundsDueForRewards)) + + inertia, err := ps.Inertia() + if err != nil { + return err + } + equilUnits, err := ps.EquilibrationUnits() + if err != nil { + return err + } + inertiaUnits := am.BigDivByUint(equilUnits, inertia) + price, err := ps.PricePerUnit() + if err != nil { + return err + } + + allocPlusInert := am.BigAddByUint(inertiaUnits, unitsAllocated) + priceChange := am.BigDiv( + am.BigSub( + am.BigMul(surplus, am.BigSub(equilUnits, common.Big1)), + am.BigMul(oldSurplus, equilUnits), + ), + am.BigMul(equilUnits, allocPlusInert), + ) + + newPrice := am.BigAdd(price, priceChange) + if newPrice.Sign() < 0 { + newPrice = common.Big0 + } + if err := ps.SetPricePerUnit(newPrice); err != nil { + return err + } + } + return nil } -func (ps *L1PricingState) getPosterInfoWithoutCache(tx *types.Transaction, sender, poster common.Address) (*big.Int, bool) { +func (ps *L1PricingState) getPosterInfoWithoutCache(tx *types.Transaction, posterAddr common.Address) (*big.Int, uint64) { - aggregator, perr := ps.ReimbursableAggregatorForSender(sender) + if posterAddr != BatchPosterAddress { + return common.Big0, 0 + } txBytes, merr := tx.MarshalBinary() txType := tx.Type() - if !util.TxTypeHasPosterCosts(txType) || perr != nil || merr != nil || aggregator == nil || poster != *aggregator { - return common.Big0, false + if !util.TxTypeHasPosterCosts(txType) || merr != nil { + return common.Big0, 0 } l1Bytes, err := byteCountAfterBrotli0(txBytes) @@ -249,66 +387,46 @@ func (ps *L1PricingState) getPosterInfoWithoutCache(tx *types.Transaction, sende } // Approximate the l1 fee charged for posting this tx's calldata - l1GasPrice, _ := ps.L1BaseFeeEstimateWei() - l1BytePrice := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) - l1Fee := arbmath.BigMulByUint(l1BytePrice, uint64(l1Bytes)) - - // Adjust the price paid by the aggregator's reported improvements due to batching - ratio, _ := ps.AggregatorCompressionRatio(poster) - adjustedL1Fee := arbmath.BigMulByBips(l1Fee, ratio) - - return adjustedL1Fee, true + pricePerUnit, _ := ps.PricePerUnit() + numUnits := l1Bytes * params.TxDataNonZeroGasEIP2028 + return am.BigMulByUint(pricePerUnit, numUnits), numUnits } -func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, sender, poster common.Address) (*big.Int, bool) { +// Returns the poster cost and the calldata units for a transaction +func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Address) (*big.Int, uint64) { cost, _ := tx.PosterCost.Load().(*big.Int) if cost != nil { - return cost, atomic.LoadInt32(&tx.PosterIsReimbursable) != 0 - } - cost, reimbursable := ps.getPosterInfoWithoutCache(tx, sender, poster) - var reimbursableInt int32 - if reimbursable { - reimbursableInt = 1 + return cost, atomic.LoadUint64(&tx.CalldataUnits) } - atomic.StoreInt32(&tx.PosterIsReimbursable, reimbursableInt) + cost, units := ps.getPosterInfoWithoutCache(tx, poster) + atomic.StoreUint64(&tx.CalldataUnits, units) tx.PosterCost.Store(cost) - return cost, reimbursable + return cost, units } const TxFixedCost = 140 // assumed maximum size in bytes of a typical RLP-encoded tx, not including its calldata -func (ps *L1PricingState) PosterDataCost(message core.Message, sender, poster common.Address) (*big.Int, bool) { - +func (ps *L1PricingState) PosterDataCost(message core.Message, poster common.Address) (*big.Int, uint64) { if tx := message.UnderlyingTransaction(); tx != nil { - return ps.GetPosterInfo(tx, sender, poster) + return ps.GetPosterInfo(tx, poster) } - if message.RunMode() == types.MessageGasEstimationMode { - // assume for the purposes of gas estimation that the poster will be the user's preferred aggregator - aggregator, _ := ps.ReimbursableAggregatorForSender(sender) - if aggregator != nil { - poster = *aggregator - } else { - // assume the user will use the delayed inbox since there's no reimbursable party - return big.NewInt(0), false - } + if poster != BatchPosterAddress { + return common.Big0, 0 } byteCount, err := byteCountAfterBrotli0(message.Data()) if err != nil { log.Error("failed to compress tx", "err", err) - return big.NewInt(0), false + return common.Big0, 0 } // Approximate the l1 fee charged for posting this tx's calldata l1Bytes := byteCount + TxFixedCost - l1GasPrice, _ := ps.L1BaseFeeEstimateWei() - l1BytePrice := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) - l1Fee := arbmath.BigMulByUint(l1BytePrice, uint64(l1Bytes)) + pricePerUnit, _ := ps.PricePerUnit() - // Adjust the price paid by the aggregator's reported improvements due to batching - ratio, _ := ps.AggregatorCompressionRatio(poster) - return arbmath.BigMulByBips(l1Fee, ratio), true + units := l1Bytes * params.TxDataNonZeroGasEIP2028 + return am.BigMulByUint(pricePerUnit, units), units } func byteCountAfterBrotli0(input []byte) (uint64, error) { diff --git a/arbos/l1pricing/l1pricing_test.go b/arbos/l1pricing/l1pricing_test.go index d93720228a..7c83373987 100644 --- a/arbos/l1pricing/l1pricing_test.go +++ b/arbos/l1pricing/l1pricing_test.go @@ -4,17 +4,16 @@ package l1pricing import ( + am "github.com/offchainlabs/nitro/util/arbmath" "math" "math/big" "testing" - "github.com/offchainlabs/nitro/arbos/burn" - "github.com/offchainlabs/nitro/arbos/storage" - "github.com/offchainlabs/nitro/util/arbmath" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/burn" + "github.com/offchainlabs/nitro/arbos/storage" ) func TestTxFixedCost(t *testing.T) { @@ -57,33 +56,16 @@ func TestL1PriceUpdate(t *testing.T) { Require(t, err) ps := OpenL1PricingState(sto) - tyme, err := ps.LastL1BaseFeeUpdateTime() + tyme, err := ps.LastUpdateTime() Require(t, err) if tyme != 0 { Fail(t) } - priceEstimate, err := ps.L1BaseFeeEstimateWei() - Require(t, err) - if priceEstimate.Cmp(big.NewInt(InitialL1BaseFeeEstimate)) != 0 { - Fail(t) - } - - newPrice := big.NewInt(20 * params.GWei) - ps.UpdatePricingModel(newPrice, 2) - priceEstimate, err = ps.L1BaseFeeEstimateWei() + initialPriceEstimate := am.UintToBig(InitialPricePerUnitWei) + priceEstimate, err := ps.PricePerUnit() Require(t, err) - - if priceEstimate.Cmp(newPrice) <= 0 || priceEstimate.Cmp(big.NewInt(InitialL1BaseFeeEstimate)) >= 0 { + if priceEstimate.Cmp(initialPriceEstimate) != 0 { Fail(t) } - - ps.UpdatePricingModel(newPrice, uint64(1)<<63) - priceEstimate, err = ps.L1BaseFeeEstimateWei() - Require(t, err) - - priceLimit := arbmath.BigAdd(newPrice, big.NewInt(300)) - if arbmath.BigGreaterThan(priceEstimate, priceLimit) || arbmath.BigLessThan(priceEstimate, newPrice) { - Fail(t, priceEstimate) - } } diff --git a/arbos/l1pricing_test.go b/arbos/l1pricing_test.go new file mode 100644 index 0000000000..fb0f1779c6 --- /dev/null +++ b/arbos/l1pricing_test.go @@ -0,0 +1,172 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbos + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/util/arbmath" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/burn" +) + +type l1PricingTest struct { + unitReward uint64 + unitsPerSecond uint64 + fundsCollectedPerSecond uint64 + fundsSpent uint64 +} + +type l1TestExpectedResults struct { + rewardRecipientBalance *big.Int + unitsRemaining uint64 + fundsReceived *big.Int + fundsStillHeld *big.Int +} + +func TestL1Pricing(t *testing.T) { + inputs := []*l1PricingTest{ + { + unitReward: 10, + unitsPerSecond: 78, + fundsCollectedPerSecond: 7800, + fundsSpent: 3000, + }, + { + unitReward: 10, + unitsPerSecond: 78, + fundsCollectedPerSecond: 1313, + fundsSpent: 3000, + }, + { + unitReward: 10, + unitsPerSecond: 78, + fundsCollectedPerSecond: 31, + fundsSpent: 3000, + }, + } + for _, input := range inputs { + expectedResult := expectedResultsForL1Test(input) + _testL1PricingFundsDue(t, input, expectedResult) + } +} + +func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults { + ret := &l1TestExpectedResults{} + availableFunds := arbmath.UintToBig(input.fundsCollectedPerSecond) + fundsWantedForRewards := big.NewInt(int64(input.unitReward * input.unitsPerSecond)) + unitsAllocated := arbmath.UintToBig(input.unitsPerSecond) + if arbmath.BigLessThan(availableFunds, fundsWantedForRewards) { + ret.rewardRecipientBalance = availableFunds + } else { + ret.rewardRecipientBalance = fundsWantedForRewards + } + availableFunds = arbmath.BigSub(availableFunds, ret.rewardRecipientBalance) + ret.unitsRemaining = (3 * input.unitsPerSecond) - unitsAllocated.Uint64() + + maxCollectable := big.NewInt(int64(input.fundsSpent)) + if arbmath.BigLessThan(availableFunds, maxCollectable) { + maxCollectable = availableFunds + } + ret.fundsReceived = maxCollectable + availableFunds = arbmath.BigSub(availableFunds, maxCollectable) + ret.fundsStillHeld = arbmath.BigAdd(arbmath.UintToBig(2*input.fundsCollectedPerSecond), availableFunds) + + return ret +} + +func _testL1PricingFundsDue(t *testing.T, testParams *l1PricingTest, expectedResults *l1TestExpectedResults) { + evm := newMockEVMForTesting() + burner := burn.NewSystemBurner(nil, false) + arbosSt, err := arbosState.OpenArbosState(evm.StateDB, burner) + Require(t, err) + + l1p := arbosSt.L1PricingState() + err = l1p.SetPerUnitReward(testParams.unitReward) + Require(t, err) + rewardAddress := common.Address{137} + err = l1p.SetPayRewardsTo(rewardAddress) + Require(t, err) + + posterTable := l1p.BatchPosterTable() + + // check initial funds state + rewardsDue, err := l1p.FundsDueForRewards() + Require(t, err) + if rewardsDue.Sign() != 0 { + t.Fatal() + } + if evm.StateDB.GetBalance(rewardAddress).Sign() != 0 { + t.Fatal() + } + posterAddrs, err := posterTable.AllPosters(math.MaxUint64) + Require(t, err) + if len(posterAddrs) != 1 { + t.Fatal() + } + firstPoster := posterAddrs[0] + firstPayTo := common.Address{1, 2} + poster, err := posterTable.OpenPoster(firstPoster, true) + Require(t, err) + due, err := poster.FundsDue() + Require(t, err) + if due.Sign() != 0 { + t.Fatal() + } + err = poster.SetPayTo(firstPayTo) + Require(t, err) + + // add another poster + secondPoster := common.Address{3, 4, 5} + secondPayTo := common.Address{6, 7} + _, err = posterTable.AddPoster(secondPoster, secondPayTo) + Require(t, err) + + // create some fake collection + balanceAdded := big.NewInt(int64(testParams.fundsCollectedPerSecond * 3)) + unitsAdded := uint64(testParams.unitsPerSecond * 3) + evm.StateDB.AddBalance(l1pricing.L1PricerFundsPoolAddress, balanceAdded) + err = l1p.SetUnitsSinceUpdate(unitsAdded) + Require(t, err) + + // submit a fake spending update, then check that balances are correct + err = l1p.UpdateForBatchPosterSpending(evm.StateDB, evm, 1, 3, firstPoster, new(big.Int).SetUint64(testParams.fundsSpent)) + Require(t, err) + rewardRecipientBalance := evm.StateDB.GetBalance(rewardAddress) + if !arbmath.BigEquals(rewardRecipientBalance, expectedResults.rewardRecipientBalance) { + t.Fatal(rewardRecipientBalance, expectedResults.rewardRecipientBalance) + } + unitsRemaining, err := l1p.UnitsSinceUpdate() + Require(t, err) + if unitsRemaining != expectedResults.unitsRemaining { + t.Fatal(unitsRemaining, expectedResults.unitsRemaining) + } + fundsReceived := evm.StateDB.GetBalance(firstPayTo) + if !arbmath.BigEquals(fundsReceived, expectedResults.fundsReceived) { + t.Fatal(fundsReceived, expectedResults.fundsReceived) + } + fundsStillHeld := evm.StateDB.GetBalance(l1pricing.L1PricerFundsPoolAddress) + if !arbmath.BigEquals(fundsStillHeld, expectedResults.fundsStillHeld) { + t.Fatal() + } +} + +func newMockEVMForTesting() *vm.EVM { + chainConfig := params.ArbitrumDevTestChainConfig() + _, statedb := arbosState.NewArbosMemoryBackedArbOSState() + context := vm.BlockContext{ + BlockNumber: big.NewInt(0), + GasLimit: ^uint64(0), + Time: big.NewInt(0), + } + evm := vm.NewEVM(context, vm.TxContext{}, statedb, chainConfig, vm.Config{}) + evm.ProcessingHook = &TxProcessor{} + return evm +} diff --git a/arbos/l2pricing/l2pricing.go b/arbos/l2pricing/l2pricing.go index 7a578f5789..1c7edd954d 100644 --- a/arbos/l2pricing/l2pricing.go +++ b/arbos/l2pricing/l2pricing.go @@ -4,25 +4,15 @@ package l2pricing import ( - "errors" - "math" "math/big" "github.com/offchainlabs/nitro/arbos/storage" - "github.com/offchainlabs/nitro/util/arbmath" ) type L2PricingState struct { storage *storage.Storage - gasPool_preExp storage.StorageBackedInt64 - gasPoolLastBlock storage.StorageBackedInt64 - gasPoolSeconds storage.StorageBackedUint64 - gasPoolTarget storage.StorageBackedBips - gasPoolWeight storage.StorageBackedBips - rateEstimate storage.StorageBackedUint64 - rateEstimateInertia storage.StorageBackedUint64 speedLimitPerSecond storage.StorageBackedUint64 - maxPerBlockGasLimit storage.StorageBackedUint64 + perBlockGasLimit storage.StorageBackedUint64 baseFeeWei storage.StorageBackedBigInt minBaseFeeWei storage.StorageBackedBigInt gasBacklog storage.StorageBackedUint64 @@ -31,15 +21,8 @@ type L2PricingState struct { } const ( - gasPoolOffset_preExp uint64 = iota - gasPoolLastBlockOffset - gasPoolSecondsOffset - gasPoolTargetOffset - gasPoolWeightOffset - rateEstimateOffset - rateEstimateInertiaOffset - speedLimitPerSecondOffset - maxPerBlockGasLimitOffset + speedLimitPerSecondOffset uint64 = iota + perBlockGasLimitOffset baseFeeWeiOffset minBaseFeeWeiOffset gasBacklogOffset @@ -47,39 +30,23 @@ const ( backlogToleranceOffset ) -const GethBlockGasLimit = 1 << 63 +const GethBlockGasLimit = 1 << 50 -func InitializeL2PricingState(sto *storage.Storage, arbosVersion uint64) error { - _ = sto.SetUint64ByUint64(gasPoolOffset_preExp, InitialGasPoolSeconds*InitialSpeedLimitPerSecond) - _ = sto.SetUint64ByUint64(gasPoolLastBlockOffset, InitialGasPoolSeconds*InitialSpeedLimitPerSecond) - _ = sto.SetUint64ByUint64(gasPoolSecondsOffset, InitialGasPoolSeconds) - _ = sto.SetUint64ByUint64(gasPoolTargetOffset, uint64(InitialGasPoolTargetBips)) - _ = sto.SetUint64ByUint64(gasPoolWeightOffset, uint64(InitialGasPoolWeightBips)) - _ = sto.SetUint64ByUint64(rateEstimateOffset, InitialSpeedLimitPerSecond) - _ = sto.SetUint64ByUint64(rateEstimateInertiaOffset, InitialRateEstimateInertia) +func InitializeL2PricingState(sto *storage.Storage) error { _ = sto.SetUint64ByUint64(speedLimitPerSecondOffset, InitialSpeedLimitPerSecond) - _ = sto.SetUint64ByUint64(maxPerBlockGasLimitOffset, InitialPerBlockGasLimit) + _ = sto.SetUint64ByUint64(perBlockGasLimitOffset, InitialPerBlockGasLimit) _ = sto.SetUint64ByUint64(baseFeeWeiOffset, InitialBaseFeeWei) - if arbosVersion >= FirstExponentialPricingVersion { - _ = sto.SetUint64ByUint64(gasBacklogOffset, 0) - _ = sto.SetUint64ByUint64(pricingInertiaOffset, InitialPricingInertia) - _ = sto.SetUint64ByUint64(backlogToleranceOffset, InitialBacklogTolerance) - } + _ = sto.SetUint64ByUint64(gasBacklogOffset, 0) + _ = sto.SetUint64ByUint64(pricingInertiaOffset, InitialPricingInertia) + _ = sto.SetUint64ByUint64(backlogToleranceOffset, InitialBacklogTolerance) return sto.SetUint64ByUint64(minBaseFeeWeiOffset, InitialMinimumBaseFeeWei) } func OpenL2PricingState(sto *storage.Storage) *L2PricingState { return &L2PricingState{ sto, - sto.OpenStorageBackedInt64(gasPoolOffset_preExp), - sto.OpenStorageBackedInt64(gasPoolLastBlockOffset), - sto.OpenStorageBackedUint64(gasPoolSecondsOffset), - sto.OpenStorageBackedBips(gasPoolTargetOffset), - sto.OpenStorageBackedBips(gasPoolWeightOffset), - sto.OpenStorageBackedUint64(rateEstimateOffset), - sto.OpenStorageBackedUint64(rateEstimateInertiaOffset), sto.OpenStorageBackedUint64(speedLimitPerSecondOffset), - sto.OpenStorageBackedUint64(maxPerBlockGasLimitOffset), + sto.OpenStorageBackedUint64(perBlockGasLimitOffset), sto.OpenStorageBackedBigInt(baseFeeWeiOffset), sto.OpenStorageBackedBigInt(minBaseFeeWeiOffset), sto.OpenStorageBackedUint64(gasBacklogOffset), @@ -88,101 +55,6 @@ func OpenL2PricingState(sto *storage.Storage) *L2PricingState { } } -func (ps *L2PricingState) UpgradeToVersion4() error { - gasPoolSeconds, err := ps.GasPoolSeconds() - if err != nil { - return err - } - speedLimit, err := ps.SpeedLimitPerSecond() - if err != nil { - return err - } - gasPool, err := ps.GasPool_preExp() - if err != nil { - return err - } - if err := ps.SetGasBacklog(uint64(int64(gasPoolSeconds*speedLimit) - gasPool)); err != nil { - return err - } - if err := ps.SetPricingInertia(InitialPricingInertia); err != nil { - return err - } - return ps.SetBacklogTolerance(InitialBacklogTolerance) -} - -func (ps *L2PricingState) GasPool_preExp() (int64, error) { - return ps.gasPool_preExp.Get() -} - -func (ps *L2PricingState) SetGasPool_preExp(val int64) error { - return ps.gasPool_preExp.Set(val) -} - -func (ps *L2PricingState) GasPoolLastBlock() (int64, error) { - return ps.gasPoolLastBlock.Get() -} - -func (ps *L2PricingState) SetGasPoolLastBlock(val int64) { - ps.Restrict(ps.gasPoolLastBlock.Set(val)) -} - -func (ps *L2PricingState) GasPoolSeconds() (uint64, error) { - return ps.gasPoolSeconds.Get() -} - -func (ps *L2PricingState) SetGasPoolSeconds(seconds uint64) error { - limit, err := ps.SpeedLimitPerSecond() - if err != nil { - return err - } - if seconds == 0 || seconds > 3*60*60 || arbmath.SaturatingUMul(seconds, limit) > math.MaxInt64 { - return errors.New("GasPoolSeconds is out of bounds") - } - if err := ps.clipGasPool_preExp(seconds, limit); err != nil { - return err - } - return ps.gasPoolSeconds.Set(seconds) -} - -func (ps *L2PricingState) GasPoolTarget() (arbmath.Bips, error) { - target, err := ps.gasPoolTarget.Get() - return arbmath.Bips(target), err -} - -func (ps *L2PricingState) SetGasPoolTarget(target arbmath.Bips) error { - if target > arbmath.OneInBips { - return errors.New("GasPoolTarget is out of bounds") - } - return ps.gasPoolTarget.Set(target) -} - -func (ps *L2PricingState) GasPoolWeight() (arbmath.Bips, error) { - return ps.gasPoolWeight.Get() -} - -func (ps *L2PricingState) SetGasPoolWeight(weight arbmath.Bips) error { - if weight > arbmath.OneInBips { - return errors.New("GasPoolWeight is out of bounds") - } - return ps.gasPoolWeight.Set(weight) -} - -func (ps *L2PricingState) RateEstimate() (uint64, error) { - return ps.rateEstimate.Get() -} - -func (ps *L2PricingState) SetRateEstimate(rate uint64) { - ps.Restrict(ps.rateEstimate.Set(rate)) -} - -func (ps *L2PricingState) RateEstimateInertia() (uint64, error) { - return ps.rateEstimateInertia.Get() -} - -func (ps *L2PricingState) SetRateEstimateInertia(inertia uint64) error { - return ps.rateEstimateInertia.Set(inertia) -} - func (ps *L2PricingState) BaseFeeWei() (*big.Int, error) { return ps.baseFeeWei.Get() } @@ -207,34 +79,15 @@ func (ps *L2PricingState) SpeedLimitPerSecond() (uint64, error) { } func (ps *L2PricingState) SetSpeedLimitPerSecond(limit uint64) error { - seconds, err := ps.GasPoolSeconds() - if err != nil { - return err - } - if limit == 0 || arbmath.SaturatingUMul(seconds, limit) > math.MaxInt64 { - return errors.New("SetSpeedLimitPerSecond is out of bounds") - } - if err := ps.clipGasPool_preExp(seconds, limit); err != nil { - return err - } return ps.speedLimitPerSecond.Set(limit) } -func (ps *L2PricingState) GasPoolMax() (int64, error) { - speedLimit, _ := ps.SpeedLimitPerSecond() - seconds, err := ps.GasPoolSeconds() - if err != nil { - return 0, err - } - return arbmath.SaturatingCast(seconds * speedLimit), nil -} - -func (ps *L2PricingState) MaxPerBlockGasLimit() (uint64, error) { - return ps.maxPerBlockGasLimit.Get() +func (ps *L2PricingState) PerBlockGasLimit() (uint64, error) { + return ps.perBlockGasLimit.Get() } func (ps *L2PricingState) SetMaxPerBlockGasLimit(limit uint64) error { - return ps.maxPerBlockGasLimit.Set(limit) + return ps.perBlockGasLimit.Set(limit) } func (ps *L2PricingState) GasBacklog() (uint64, error) { @@ -261,19 +114,6 @@ func (ps *L2PricingState) SetBacklogTolerance(val uint64) error { return ps.backlogTolerance.Set(val) } -// Ensure the gas pool is within the implied maximum capacity -func (ps *L2PricingState) clipGasPool_preExp(seconds, speedLimit uint64) error { - pool, err := ps.GasPool_preExp() - if err != nil { - return err - } - newMax := arbmath.SaturatingCast(arbmath.SaturatingUMul(seconds, speedLimit)) - if pool > newMax { - err = ps.SetGasPool_preExp(newMax) - } - return err -} - func (ps *L2PricingState) Restrict(err error) { ps.storage.Burner().Restrict(err) } diff --git a/arbos/l2pricing/l2pricing_test.go b/arbos/l2pricing/l2pricing_test.go index 71ccecd454..57759d7f82 100644 --- a/arbos/l2pricing/l2pricing_test.go +++ b/arbos/l2pricing/l2pricing_test.go @@ -14,88 +14,52 @@ import ( "github.com/offchainlabs/nitro/util/testhelpers" ) -func PricingForTest(t *testing.T, arbosVersion uint64) *L2PricingState { +func PricingForTest(t *testing.T) *L2PricingState { storage := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) - err := InitializeL2PricingState(storage, arbosVersion) + err := InitializeL2PricingState(storage) Require(t, err) return OpenL2PricingState(storage) } -func fakeBlockUpdate(t *testing.T, pricing *L2PricingState, gasUsed int64, timePassed uint64, arbosVersion uint64) { +func fakeBlockUpdate(t *testing.T, pricing *L2PricingState, gasUsed int64, timePassed uint64) { basefee := getPrice(t, pricing) - pricing.storage.Burner().Restrict(pricing.AddToGasPool(-gasUsed, arbosVersion)) - pricing.UpdatePricingModel(arbmath.UintToBig(basefee), timePassed, arbosVersion, true) -} - -func TestPricingModelPreExp(t *testing.T) { - versionedTestPricingModel(t, FirstExponentialPricingVersion-1) + pricing.storage.Burner().Restrict(pricing.AddToGasPool(-gasUsed)) + pricing.UpdatePricingModel(arbmath.UintToBig(basefee), timePassed, true) } func TestPricingModelExp(t *testing.T) { - versionedTestPricingModel(t, FirstExponentialPricingVersion) -} - -func versionedTestPricingModel(t *testing.T, arbosVersion uint64) { - pricing := PricingForTest(t, arbosVersion) - maxPool := maxGasPool(t, pricing) - gasPool := getGasPool(t, pricing) + pricing := PricingForTest(t) minPrice := getMinPrice(t, pricing) price := getPrice(t, pricing) limit := getSpeedLimit(t, pricing) - if gasPool != maxPool { - Fail(t, "pool not filled", gasPool, maxPool) - } if price != minPrice { Fail(t, "price not minimal", price, minPrice) } - // declare that we've been running at the speed limit - pricing.SetRateEstimate(limit) - // show that running at the speed limit with a full pool is a steady-state colors.PrintBlue("full pool & speed limit") for seconds := 0; seconds < 4; seconds++ { - fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds), arbosVersion) + fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") } - if arbosVersion < 4 && getGasPool(t, pricing) != maxPool { - Fail(t, "pool changed when it shouldn't have") - } } - // set the gas pool to the target - target, _ := pricing.GasPoolTarget() - poolTarget := int64(target) * maxPool / 10000 - Require(t, pricing.SetGasPool_preExp(poolTarget)) - pricing.SetGasPoolLastBlock(poolTarget) - pricing.SetRateEstimate(limit) - // show that running at the speed limit with a target pool is close to a steady-state // note that for large enough spans of time the price will rise a miniscule amount due to the pool's avg colors.PrintBlue("pool target & speed limit") for seconds := 0; seconds < 4; seconds++ { - fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds), arbosVersion) + fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") } - if arbosVersion < FirstExponentialPricingVersion && getGasPool(t, pricing) != poolTarget { - Fail(t, "pool changed when it shouldn't have") - } } - // fill the gas pool - Require(t, pricing.SetGasPool_preExp(maxPool)) - pricing.SetGasPoolLastBlock(maxPool) - // show that running over the speed limit escalates the price before the pool drains colors.PrintBlue("exceeding the speed limit") for { - fakeBlockUpdate(t, pricing, 8*int64(limit), 1, arbosVersion) - if arbosVersion < FirstExponentialPricingVersion && getGasPool(t, pricing) < poolTarget { - Fail(t, "the price failed to rise before the pool drained") - } + fakeBlockUpdate(t, pricing, 8*int64(limit), 1) newPrice := getPrice(t, pricing) if newPrice < price { Fail(t, "the price shouldn't have fallen") @@ -107,44 +71,22 @@ func versionedTestPricingModel(t *testing.T, arbosVersion uint64) { } // empty the pool - pricing.SetRateEstimate(limit) price = getPrice(t, pricing) - rate := rateEstimate(t, pricing) - if arbosVersion < FirstExponentialPricingVersion { - Require(t, pricing.SetGasPool_preExp(0)) - pricing.SetGasPoolLastBlock(0) - } else { - Require(t, pricing.SetGasBacklog(100000000)) - } + Require(t, pricing.SetGasBacklog(100000000)) // show that nothing happens when no time has passed and no gas has been burnt colors.PrintBlue("nothing should happen") - fakeBlockUpdate(t, pricing, 0, 0, arbosVersion) - if arbosVersion < FirstExponentialPricingVersion && (getPrice(t, pricing) != price || getGasPool(t, pricing) != 0 || rateEstimate(t, pricing) != rate) { - Fail(t, "state shouldn't have changed") - } + fakeBlockUpdate(t, pricing, 0, 0) // show that the pool will escalate the price colors.PrintBlue("gas pool is empty") - fakeBlockUpdate(t, pricing, 0, 1, arbosVersion) + fakeBlockUpdate(t, pricing, 0, 1) if getPrice(t, pricing) <= price { fmt.Println(price, getPrice(t, pricing)) Fail(t, "price should have risen") } } -func maxGasPool(t *testing.T, pricing *L2PricingState) int64 { - value, err := pricing.GasPoolMax() - Require(t, err) - return value -} - -func getGasPool(t *testing.T, pricing *L2PricingState) int64 { - value, err := pricing.GasPool_preExp() - Require(t, err) - return value -} - func getPrice(t *testing.T, pricing *L2PricingState) uint64 { value, err := pricing.BaseFeeWei() Require(t, err) @@ -163,12 +105,6 @@ func getSpeedLimit(t *testing.T, pricing *L2PricingState) uint64 { return value } -func rateEstimate(t *testing.T, pricing *L2PricingState) uint64 { - value, err := pricing.RateEstimate() - Require(t, err) - return value -} - func Require(t *testing.T, err error, printables ...interface{}) { t.Helper() testhelpers.RequireImpl(t, err, printables...) diff --git a/arbos/l2pricing/model.go b/arbos/l2pricing/model.go index bd97ea2795..4f93cbffdd 100644 --- a/arbos/l2pricing/model.go +++ b/arbos/l2pricing/model.go @@ -6,10 +6,8 @@ package l2pricing import ( "math/big" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/colors" ) const InitialSpeedLimitPerSecond = 1000000 @@ -24,13 +22,7 @@ const InitialBacklogTolerance = 10 var InitialGasPoolTargetBips = arbmath.PercentToBips(80) var InitialGasPoolWeightBips = arbmath.PercentToBips(60) -const FirstExponentialPricingVersion = 4 - -func (ps *L2PricingState) AddToGasPool(gas int64, arbosVersion uint64) error { - if arbosVersion < FirstExponentialPricingVersion { - return ps.AddToGasPool_preExp(gas) - } - +func (ps *L2PricingState) AddToGasPool(gas int64) error { backlog, err := ps.GasBacklog() if err != nil { return err @@ -40,25 +32,10 @@ func (ps *L2PricingState) AddToGasPool(gas int64, arbosVersion uint64) error { return ps.SetGasBacklog(backlog) } -func (ps *L2PricingState) AddToGasPool_preExp(gas int64) error { - gasPool, err := ps.GasPool_preExp() - if err != nil { - return err - } - return ps.SetGasPool_preExp(arbmath.SaturatingAdd(gasPool, gas)) -} - // Update the pricing model with info from the last block -func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint64, arbosVersion uint64, debug bool) { - if arbosVersion < FirstExponentialPricingVersion { - // note: if we restart the chain at version >= FirstExponentialPricingVersion, - // we can simplify the L2-pricing-related params and precompiles - ps.UpdatePricingModel_preExp(l2BaseFee, timePassed, arbosVersion, debug) - return - } - +func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint64, debug bool) { speedLimit, _ := ps.SpeedLimitPerSecond() - _ = ps.AddToGasPool(int64(timePassed*speedLimit), arbosVersion) + _ = ps.AddToGasPool(int64(timePassed * speedLimit)) inertia, _ := ps.PricingInertia() tolerance, _ := ps.BacklogTolerance() backlog, _ := ps.GasBacklog() @@ -71,110 +48,3 @@ func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint } _ = ps.SetBaseFeeWei(baseFee) } - -func (ps *L2PricingState) UpdatePricingModel_preExp(l2BaseFee *big.Int, timePassed uint64, arbosVersion uint64, debug bool) { - - // update the rate estimate, which is the weighted average of the past and present - // rate' = weighted average of the historical rate and the current - // rate' = (memory * rate + passed * recent) / (memory + passed) - // rate' = (memory * rate + used) / (memory + passed) - // - gasPool, _ := ps.GasPool_preExp() - gasPoolLastBlock, _ := ps.GasPoolLastBlock() - poolMax, _ := ps.GasPoolMax() - gasPool = arbmath.MinInt(gasPool, poolMax) - gasPoolLastBlock = arbmath.MinInt(gasPoolLastBlock, poolMax) - gasUsed := uint64(gasPoolLastBlock - gasPool) - rateSeconds, _ := ps.RateEstimateInertia() - priorRate, _ := ps.RateEstimate() - rate := arbmath.SaturatingUAdd(arbmath.SaturatingUMul(rateSeconds, priorRate), gasUsed) / (rateSeconds + timePassed) - ps.SetRateEstimate(rate) - - // compute the rate ratio - // ratio = recent gas consumption rate / speed limit - // - speedLimit, _ := ps.SpeedLimitPerSecond() - rateRatio := arbmath.UfracToBigFloat(rate, speedLimit) - - // compute the pool fullness ratio & the updated gas pool - // ratio = max(0, 2 - (average fullness) / (target fullness)) - // pool' = min(maximum, pool + speed * passed) - // - timeToFull := (poolMax - gasPool) / int64(speedLimit) - var averagePool uint64 - var newGasPool int64 - if timePassed > uint64(timeToFull) { - spaceBefore := uint64(poolMax - gasPool) - averagePool = uint64(poolMax) - spaceBefore*spaceBefore/arbmath.SaturatingUMul(2*speedLimit, timePassed) - newGasPool = poolMax - } else { - averagePool = uint64(gasPool) + timePassed*speedLimit/2 - newGasPool = gasPool + int64(speedLimit*timePassed) - } - poolTarget, _ := ps.GasPoolTarget() - poolTargetGas := uint64(arbmath.IntMulByBips(poolMax, poolTarget)) - poolRatio := arbmath.UfracToBigFloat(0, 1) - if averagePool < 2*poolTargetGas { - poolRatio = arbmath.UfracToBigFloat(2*poolTargetGas-averagePool, poolTargetGas) - } - - // take the weighted average of the ratios, in basis points - // average = weight * pool + (1 - weight) * rate - // - poolWeight, _ := ps.GasPoolWeight() - oneInBips := arbmath.OneInBips - averageOfRatiosRaw, _ := arbmath.BigAddFloat( - arbmath.BigFloatMulByUint(poolRatio, uint64(poolWeight)), - arbmath.BigFloatMulByUint(rateRatio, uint64(oneInBips-poolWeight)), - ).Uint64() - averageOfRatios := arbmath.Bips(averageOfRatiosRaw) - averageOfRatiosUnbounded := averageOfRatios - if arbosVersion < 3 && averageOfRatios > arbmath.PercentToBips(200) { - averageOfRatios = arbmath.PercentToBips(200) - } - - // update the gas price, adjusting each second by the max allowed by EIP 1559 - // price' = price * exp(seconds at intensity) / 2 mins - // - exp := (averageOfRatios - arbmath.OneInBips) * arbmath.Bips(timePassed) / 120 // limit to EIP 1559's max rate - price := arbmath.BigMulByBips(l2BaseFee, arbmath.ApproxExpBasisPoints(exp)) - maxPrice := arbmath.BigMulByInt(l2BaseFee, params.ElasticityMultiplier) - minPrice, _ := ps.MinBaseFeeWei() - - p := func(args ...interface{}) { - if debug { - colors.PrintGrey(args...) - } - } - p("\nused\t", gasUsed, " in ", timePassed, "s = ", rate, "/s vs limit ", speedLimit, "/s for ", rateRatio) - p("pool\t", gasPool, "/", poolMax, " ➤ ", averagePool, " ➤ ", newGasPool, " ", poolRatio) - p("ratio\t", poolRatio, rateRatio, " ➤ ", averageOfRatiosUnbounded, "‱ ") - p("exp()\t", exp, " ➤ ", arbmath.ApproxExpBasisPoints(exp), "‱ ") - p("price\t", l2BaseFee, " ➤ ", price, " bound to [", minPrice, ", ", maxPrice, "]\n") - - if arbmath.BigLessThan(price, minPrice) { - price = minPrice - } - if arbmath.BigGreaterThan(price, maxPrice) { - log.Warn("ArbOS tried to 2x the price", "price", price, "bound", maxPrice) - price = maxPrice - } - _ = ps.SetBaseFeeWei(price) - _ = ps.SetGasPool_preExp(newGasPool) - ps.SetGasPoolLastBlock(newGasPool) -} - -func (ps *L2PricingState) PerBlockGasLimit(arbosVersion uint64) (uint64, error) { - if arbosVersion >= FirstExponentialPricingVersion { - return ps.MaxPerBlockGasLimit() - } - pool, _ := ps.GasPool_preExp() - maxLimit, err := ps.MaxPerBlockGasLimit() - if pool < 0 || err != nil { - return 0, err - } else if uint64(pool) > maxLimit { - return maxLimit, nil - } else { - return uint64(pool), nil - } -} diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index d732ee342b..c4352997b0 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -328,7 +328,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm return windowsLeftStorage.Set(windowsLeft - 1) } -func (retryable *Retryable) MakeTx(chainId *big.Int, nonce uint64, gasFeeCap *big.Int, gas uint64, ticketId common.Hash, refundTo common.Address) (*types.ArbitrumRetryTx, error) { +func (retryable *Retryable) MakeTx(chainId *big.Int, nonce uint64, gasFeeCap *big.Int, gas uint64, ticketId common.Hash, refundTo common.Address, maxRefund *big.Int, submissionFeeRefund *big.Int) (*types.ArbitrumRetryTx, error) { from, err := retryable.From() if err != nil { return nil, err @@ -346,16 +346,18 @@ func (retryable *Retryable) MakeTx(chainId *big.Int, nonce uint64, gasFeeCap *bi return nil, err } return &types.ArbitrumRetryTx{ - ChainId: chainId, - Nonce: nonce, - From: from, - GasFeeCap: gasFeeCap, - Gas: gas, - To: to, - Value: callvalue, - Data: calldata, - TicketId: ticketId, - RefundTo: refundTo, + ChainId: chainId, + Nonce: nonce, + From: from, + GasFeeCap: gasFeeCap, + Gas: gas, + To: to, + Value: callvalue, + Data: calldata, + TicketId: ticketId, + RefundTo: refundTo, + MaxRefund: maxRefund, + SubmissionFeeRefund: submissionFeeRefund, }, nil } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 6dfd1c4d83..df9f726a5b 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -450,6 +450,10 @@ func (sbbi *StorageBackedBigInt) Set(val *big.Int) error { return sbbi.StorageSlot.Set(common.BigToHash(val)) } +func (sbbi *StorageBackedBigInt) SetByUint(val uint64) error { + return sbbi.StorageSlot.Set(util.UintToHash(val)) +} + type StorageBackedAddress struct { StorageSlot } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index ccb26bc962..e14c9fe5a9 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -9,6 +9,8 @@ import ( "math/big" "time" + "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" @@ -28,6 +30,8 @@ import ( var arbosAddress = types.ArbosAddress +const GasEstimationL1PricePadding arbmath.Bips = 11000 // pad estimates by 10% + // A TxProcessor is created and freed for every L2 transaction. // It tracks state for ArbOS, allowing it infuence in Geth's tx processing. // Public fields are accessible in precompiles. @@ -41,6 +45,7 @@ type TxProcessor struct { TopTxType *byte // set once in StartTxHook evm *vm.EVM CurrentRetryable *common.Hash + CurrentRefundTo *common.Address } func NewTxProcessor(evm *vm.EVM, msg core.Message) *TxProcessor { @@ -55,6 +60,7 @@ func NewTxProcessor(evm *vm.EVM, msg core.Message) *TxProcessor { TopTxType: nil, evm: evm, CurrentRetryable: nil, + CurrentRefundTo: nil, } } @@ -66,8 +72,17 @@ func (p *TxProcessor) PopCaller() { p.Callers = p.Callers[:len(p.Callers)-1] } -func (p *TxProcessor) DropTip() bool { - return p.state.FormatVersion() >= 2 +// Attempts to subtract up to `take` from `pool` without going negative. +// Returns the amount subtracted from `pool`. +func takeFunds(pool *big.Int, take *big.Int) *big.Int { + if arbmath.BigLessThan(pool, take) { + oldPool := new(big.Int).Set(pool) + pool.Set(common.Big0) + return oldPool + } else { + pool.Sub(pool, take) + return new(big.Int).Set(take) + } } func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, returnData []byte) { @@ -80,7 +95,6 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } var tracingInfo *util.TracingInfo - from := p.msg.From() tipe := underlyingTx.Type() p.TopTxType = &tipe evm := p.evm @@ -91,6 +105,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } evm.IncrementDepth() // fake a call tracer := evm.Config.Tracer + from := p.msg.From() start := time.Now() tracer.CaptureStart(evm, from, *p.msg.To(), false, p.msg.Data(), p.msg.Gas(), p.msg.Value()) @@ -109,9 +124,6 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r switch tx := underlyingTx.GetInner().(type) { case *types.ArbitrumDepositTx: defer (startTracer())() - if p.msg.From() != arbosAddress { - return false, 0, errors.New("deposit not from arbAddress"), nil - } util.MintBalance(p.msg.To(), p.msg.Value(), evm, util.TracingDuringEVM, "deposit") return true, 0, nil, nil case *types.ArbitrumInternalTx: @@ -131,25 +143,33 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r scenario := util.TracingDuringEVM // mint funds with the deposit, then charge fees later - util.MintBalance(&from, tx.DepositValue, evm, scenario, "deposit") - - submissionFee := retryables.RetryableSubmissionFee(len(tx.RetryData), tx.L1BaseFee) - excessDeposit := arbmath.BigSub(tx.MaxSubmissionFee, submissionFee) - if excessDeposit.Sign() < 0 { - return true, 0, errors.New("max submission fee is less than the actual submission fee"), nil - } + availableRefund := new(big.Int).Set(tx.DepositValue) + takeFunds(availableRefund, tx.Value) + util.MintBalance(&tx.From, tx.DepositValue, evm, scenario, "deposit") transfer := func(from, to *common.Address, amount *big.Int) error { return util.TransferBalance(from, to, amount, evm, scenario, "during evm execution") } - // move balance to the relevant parties - if err := transfer(&from, &networkFeeAccount, submissionFee); err != nil { + // collect the submission fee + submissionFee := retryables.RetryableSubmissionFee(len(tx.RetryData), tx.L1BaseFee) + if err := transfer(&tx.From, &networkFeeAccount, submissionFee); err != nil { return true, 0, err, nil } - if err := transfer(&from, &tx.FeeRefundAddr, excessDeposit); err != nil { - return true, 0, err, nil + withheldSubmissionFee := takeFunds(availableRefund, submissionFee) + + // refund excess submission fee + submissionFeeRefund := arbmath.BigSub(tx.MaxSubmissionFee, submissionFee) + if submissionFeeRefund.Sign() < 0 { + return true, 0, errors.New("max submission fee is less than the actual submission fee"), nil + } + submissionFeeRefund = takeFunds(availableRefund, submissionFeeRefund) + if err := transfer(&tx.From, &tx.FeeRefundAddr, submissionFeeRefund); err != nil { + // should never happen as from's balance should be at least availableRefund at this point + glog.Error("failed to transfer submissionFeeRefund", "err", err) } + + // move the callvalue into escrow if err := transfer(&tx.From, &escrow, tx.Value); err != nil { return true, 0, err, nil } @@ -195,6 +215,15 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r panic(err) } + withheldGasFunds := takeFunds(availableRefund, gascost) // gascost is conceptually charged before the gas price refund + gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, basefee), tx.Gas) + gasPriceRefund = takeFunds(availableRefund, gasPriceRefund) + if err := transfer(&tx.From, &tx.FeeRefundAddr, gasPriceRefund); err != nil { + glog.Error("failed to transfer gasPriceRefund", "err", err) + } + availableRefund.Add(availableRefund, withheldGasFunds) + availableRefund.Add(availableRefund, withheldSubmissionFee) + // emit RedeemScheduled event retryTxInner, err := retryable.MakeTx( underlyingTx.ChainId(), @@ -203,6 +232,8 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r usergas, ticketId, tx.FeeRefundAddr, + availableRefund, + submissionFee, ) p.state.Restrict(err) @@ -216,6 +247,8 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r ticketId, types.NewTx(retryTxInner).Hash(), tx.FeeRefundAddr, + availableRefund, + submissionFee, ) if err != nil { glog.Error("failed to emit RedeemScheduled event", "err", err) @@ -244,20 +277,33 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r prepaid := arbmath.BigMulByUint(evm.Context.BaseFee, tx.Gas) util.MintBalance(&tx.From, prepaid, evm, scenario, "prepaid") ticketId := tx.TicketId + refundTo := tx.RefundTo p.CurrentRetryable = &ticketId + p.CurrentRefundTo = &refundTo } return false, 0, nil, nil } -func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (*common.Address, error) { +func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) error { // Because a user pays a 1-dimensional gas price, we must re-express poster L1 calldata costs // as if the user was buying an equivalent amount of L2 compute gas. This hook determines what // that cost looks like, ensuring the user can pay and saving the result for later reference. var gasNeededToStartEVM uint64 gasPrice := p.evm.Context.BaseFee - coinbase := p.evm.Context.Coinbase - posterCost, reimburse := p.state.L1PricingState().PosterDataCost(p.msg, p.msg.From(), coinbase) + + var poster common.Address + if p.msg.RunMode() != types.MessageCommitMode { + poster = l1pricing.BatchPosterAddress + } else { + poster = p.evm.Context.Coinbase + } + posterCost, calldataUnits := p.state.L1PricingState().PosterDataCost(p.msg, poster) + if calldataUnits > 0 { + if err := p.state.L1PricingState().AddToUnitsSinceUpdate(calldataUnits); err != nil { + return err + } + } if p.msg.RunMode() == types.MessageGasEstimationMode { // Suggest the amount of gas needed for a given amount of ETH is higher in case of congestion. @@ -272,9 +318,10 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (*common.Address, er } gasPrice = adjustedPrice - // Pad the L1 cost by 10% in case the L1 gas price rises - posterCost = arbmath.BigMulByFrac(posterCost, 110, 100) + // Pad the L1 cost in case the L1 gas price rises + posterCost = arbmath.BigMulByBips(posterCost, GasEstimationL1PricePadding) } + if gasPrice.Sign() > 0 { posterCostInL2Gas := arbmath.BigDiv(posterCost, gasPrice) // the cost as if it were an amount of gas if !posterCostInL2Gas.IsUint64() { @@ -285,30 +332,22 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (*common.Address, er gasNeededToStartEVM = p.posterGas } - // Most users shouldn't set a tip, but if specified only give it to the poster if they're reimbursable - tipRecipient := &coinbase - if !reimburse { - networkFeeAccount, _ := p.state.NetworkFeeAccount() - tipRecipient = &networkFeeAccount - } - if *gasRemaining < gasNeededToStartEVM { // the user couldn't pay for call data, so give up - return tipRecipient, core.ErrIntrinsicGas + return core.ErrIntrinsicGas } *gasRemaining -= gasNeededToStartEVM if p.msg.RunMode() != types.MessageEthcallMode { // If this is a real tx, limit the amount of computed based on the gas pool. // We do this by charging extra gas, and then refunding it later. - gasAvailable, _ := p.state.L2PricingState().PerBlockGasLimit(p.state.FormatVersion()) + gasAvailable, _ := p.state.L2PricingState().PerBlockGasLimit() if *gasRemaining > gasAvailable { p.computeHoldGas = *gasRemaining - gasAvailable *gasRemaining = gasAvailable } } - - return tipRecipient, nil + return nil } func (p *TxProcessor) NonrefundableGas() uint64 { @@ -336,22 +375,47 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) - refund := arbmath.BigMulByUint(gasPrice, gasLeft) // undo Geth's refund to the From address - err := util.TransferBalance(&inner.From, nil, refund, p.evm, scenario, "undoRefund") + gasRefund := arbmath.BigMulByUint(gasPrice, gasLeft) + err := util.BurnBalance(&inner.From, gasRefund, p.evm, scenario, "undoRefund") if err != nil { - log.Error("Uh oh, Geth didn't refund the user", inner.From, refund) + log.Error("Uh oh, Geth didn't refund the user", inner.From, gasRefund) } - // refund the RefundTo by taking fees back from the network address - err = util.TransferBalance(&networkFeeAccount, &inner.RefundTo, refund, p.evm, scenario, "refund") - if err != nil { - // Normally the network fee address should be holding the gas funds. - // However, in theory, they could've been transfered out during the redeem attempt. - // If the network fee address doesn't have the necessary balance, log an error and don't give a refund. - log.Error("network fee address doesn't have enough funds to give user refund", "err", err) + maxRefund := new(big.Int).Set(inner.MaxRefund) + refundNetworkFee := func(amount *big.Int) { + const errLog = "network fee address doesn't have enough funds to give user refund" + + // Refund funds to the fee refund address without overdrafting the L1 deposit. + toRefundAddr := takeFunds(maxRefund, amount) + err = util.TransferBalance(&networkFeeAccount, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund") + if err != nil { + // Normally the network fee address should be holding any collected fees. + // However, in theory, they could've been transfered out during the redeem attempt. + // If the network fee address doesn't have the necessary balance, log an error and don't give a refund. + log.Error(errLog, "err", err) + } + // Any extra refund can't be given to the fee refund address if it didn't come from the L1 deposit. + // Instead, give the refund to the retryable from address. + err = util.TransferBalance(&networkFeeAccount, &inner.From, arbmath.BigSub(amount, toRefundAddr), p.evm, scenario, "refund") + if err != nil { + log.Error(errLog, "err", err) + } + } + + if success { + // If successful, refund the submission fee. + refundNetworkFee(inner.SubmissionFeeRefund) + } else { + // The submission fee is still taken from the L1 deposit earlier, even if it's not refunded. + takeFunds(maxRefund, inner.SubmissionFeeRefund) } + // Conceptually, the gas charge is taken from the L1 deposit pool if possible. + takeFunds(maxRefund, arbmath.BigMulByUint(gasPrice, gasUsed)) + // Refund any unused gas, without overdrafting the L1 deposit. + refundNetworkFee(gasRefund) + if success { // we don't want to charge for this tracingInfo := util.NewTracingInfo(p.evm, arbosAddress, p.msg.From(), scenario) @@ -368,7 +432,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { } } // we've already credited the network fee account, but we didn't charge the gas pool yet - p.state.Restrict(p.state.L2PricingState().AddToGasPool(-arbmath.SaturatingCast(gasUsed), p.state.FormatVersion())) + p.state.Restrict(p.state.L2PricingState().AddToGasPool(-arbmath.SaturatingCast(gasUsed))) return } @@ -402,7 +466,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { log.Error("total gas used < poster gas component", "gasUsed", gasUsed, "posterGas", p.posterGas) computeGas = gasUsed } - p.state.Restrict(p.state.L2PricingState().AddToGasPool(-arbmath.SaturatingCast(computeGas), p.state.FormatVersion())) + p.state.Restrict(p.state.L2PricingState().AddToGasPool(-arbmath.SaturatingCast(computeGas))) } } @@ -434,6 +498,8 @@ func (p *TxProcessor) ScheduledTxes() types.Transactions { event.DonatedGas, event.TicketId, event.GasDonor, + event.MaxRefund, + event.SubmissionFeeRefund, ) scheduled = append(scheduled, types.NewTx(redeem)) } @@ -455,19 +521,6 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlockNumber uint64 if err != nil { return common.Hash{}, err } - if state.FormatVersion() < 2 { - // Support the old broken behavior - var lower, upper uint64 - upper = p.evm.Context.BlockNumber.Uint64() - if upper < 257 { - lower = 0 - } else { - lower = upper - 256 - } - if l1BlockNumber < lower || l1BlockNumber >= upper { - return common.Hash{}, nil - } - } return state.Blockhashes().BlockHash(l1BlockNumber) } diff --git a/arbos/util/util.go b/arbos/util/util.go index 55c10b0b93..ca97fa0c6b 100644 --- a/arbos/util/util.go +++ b/arbos/util/util.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" ) var AddressAliasOffset *big.Int @@ -24,6 +25,8 @@ var ParseL2ToL1TransactionLog func(interface{}, *types.Log) error var ParseL2ToL1TxLog func(interface{}, *types.Log) error var PackInternalTxDataStartBlock func(...interface{}) ([]byte, error) var UnpackInternalTxDataStartBlock func([]byte) ([]interface{}, error) +var PackInternalTxDataBatchPostingReport func(...interface{}) ([]byte, error) +var UnpackInternalTxDataBatchPostingReport func([]byte) ([]interface{}, error) var PackArbRetryableTxRedeem func(...interface{}) ([]byte, error) func init() { @@ -32,7 +35,7 @@ func init() { panic("Error initializing AddressAliasOffset") } AddressAliasOffset = offset - InverseAddressAliasOffset = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 160), AddressAliasOffset) + InverseAddressAliasOffset = arbmath.BigSub(new(big.Int).Lsh(big.NewInt(1), 160), AddressAliasOffset) // Create a mechanism for parsing event logs logParser := func(source string, name string) func(interface{}, *types.Log) error { @@ -88,6 +91,7 @@ func init() { acts := precompilesgen.ArbosActsABI PackInternalTxDataStartBlock, UnpackInternalTxDataStartBlock = callParser(acts, "startBlock") + PackInternalTxDataBatchPostingReport, UnpackInternalTxDataBatchPostingReport = callParser(acts, "batchPostingReport") PackArbRetryableTxRedeem, _ = callParser(precompilesgen.ArbRetryableTxABI, "redeem") } diff --git a/arbstate/geth_test.go b/arbstate/geth_test.go index 33b11d5235..075516a8b0 100644 --- a/arbstate/geth_test.go +++ b/arbstate/geth_test.go @@ -73,6 +73,9 @@ func TestEthDepositMessage(t *testing.T) { L1BaseFee: big.NewInt(10000000000000), } msgBuf := bytes.Buffer{} + if err := util.AddressToWriter(addr, &msgBuf); err != nil { + t.Error(err) + } if err := util.HashToWriter(balance, &msgBuf); err != nil { t.Error(err) } @@ -88,7 +91,11 @@ func TestEthDepositMessage(t *testing.T) { secondRequestId := common.BigToHash(big.NewInt(4)) header.RequestId = &secondRequestId + header.Poster = util.RemapL1Address(addr) msgBuf2 := bytes.Buffer{} + if err := util.AddressToWriter(addr, &msgBuf2); err != nil { + t.Error(err) + } if err := util.HashToWriter(balance2, &msgBuf2); err != nil { t.Error(err) } @@ -103,7 +110,7 @@ func TestEthDepositMessage(t *testing.T) { RunMessagesThroughAPI(t, [][]byte{serialized, serialized2}, statedb) - balanceAfter := statedb.GetBalance(util.RemapL1Address(addr)) + balanceAfter := statedb.GetBalance(addr) if balanceAfter.Cmp(new(big.Int).Add(balance.Big(), balance2.Big())) != 0 { Fail(t) } @@ -116,7 +123,7 @@ func RunMessagesThroughAPI(t *testing.T, msgs [][]byte, statedb *state.StateDB) if err != nil { t.Error(err) } - txes, err := msg.ParseL2Transactions(chainId) + txes, err := msg.ParseL2Transactions(chainId, nil) if err != nil { t.Error(err) } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b7f270655a..5f0c3be13e 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -367,7 +367,7 @@ func (r *inboxMultiplexer) getNextMsg() (*MessageWithMetadata, error) { Message: &arbos.L1IncomingMessage{ Header: &arbos.L1IncomingMessageHeader{ Kind: arbos.L1MessageType_L2Message, - Poster: l1pricing.SequencerAddress, + Poster: l1pricing.BatchPosterAddress, BlockNumber: blockNumber, Timestamp: timestamp, RequestId: nil, diff --git a/blockscout b/blockscout index f0a1e5fb5d..310d0283a3 160000 --- a/blockscout +++ b/blockscout @@ -1 +1 @@ -Subproject commit f0a1e5fb5d9c72859a0974ecf36c9a1c40f725ca +Subproject commit 310d0283a308e12bfd5a24a07863cc4100cb58a6 diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 3712a0abf9..adfee47f48 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -7,12 +7,14 @@ import ( "context" "encoding/json" "flag" - "github.com/offchainlabs/nitro/cmd/genericconf" - "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/validator" "io/ioutil" "math/big" "os" + "time" + + "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" @@ -33,6 +35,8 @@ func main() { l1conn := flag.String("l1conn", "", "l1 connection") l1keystore := flag.String("l1keystore", "", "l1 private key store") deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") + ownerAddress := flag.String("ownerAddress", "", "the rollup owner's address") + sequencerAddress := flag.String("sequencerAddress", "", "the sequencer's address") wasmmoduleroot := flag.String("wasmmoduleroot", "", "WASM module root hash") wasmrootpath := flag.String("wasmrootpath", "", "path to machine folders") l1passphrase := flag.String("l1passphrase", "passphrase", "l1 private key file passphrase") @@ -40,6 +44,7 @@ func main() { l1ChainIdUint := flag.Uint64("l1chainid", 1337, "L1 chain ID") l2ChainIdUint := flag.Uint64("l2chainid", params.ArbitrumDevTestChainConfig().ChainID.Uint64(), "L2 chain ID") authorizevalidators := flag.Uint64("authorizevalidators", 0, "Number of validators to preemptively authorize") + txTimeout := flag.Duration("txtimeout", 10*time.Minute, "Timeout when waiting for a transaction to be included in a block") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) l2ChainId := new(big.Int).SetUint64(*l2ChainIdUint) @@ -63,10 +68,31 @@ func main() { panic(err) } + if !common.IsHexAddress(*sequencerAddress) { + panic("please specify a valid sequencer address") + } + if !common.IsHexAddress(*ownerAddress) { + panic("please specify a valid rollup owner address") + } + machineConfig := validator.DefaultNitroMachineConfig machineConfig.RootPath = *wasmrootpath - deployPtr, err := arbnode.DeployOnL1(ctx, l1client, l1TransactionOpts, l1TransactionOpts.From, *authorizevalidators, common.HexToHash(*wasmmoduleroot), l2ChainId, headerreader.DefaultConfig, machineConfig) + headerReaderConfig := headerreader.DefaultConfig + headerReaderConfig.TxTimeout = *txTimeout + + deployPtr, err := arbnode.DeployOnL1( + ctx, + l1client, + l1TransactionOpts, + common.HexToAddress(*sequencerAddress), + common.HexToAddress(*ownerAddress), + *authorizevalidators, + common.HexToHash(*wasmmoduleroot), + l2ChainId, + headerReaderConfig, + machineConfig, + ) if err != nil { flag.Usage() log.Error("error deploying on l1") diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 49fd2eb970..cf1ad42ec1 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -235,19 +235,13 @@ func main() { initDataReader = statetransfer.NewMemoryInitDataReader(&initData) } - chainConfig, err := arbos.GetChainConfig(new(big.Int).SetUint64(nodeConfig.L2.ChainID)) - if err != nil { - panic(err) - } + var chainConfig *params.ChainConfig var l2BlockChain *core.BlockChain if nodeConfig.NoInit { - blocksInDb, err := chainDb.Ancients() - if err != nil { - panic(err) - } - if blocksInDb == 0 { - panic("No initialization mode supplied, no blocks in Db") + chainConfig = arbnode.TryReadStoredChainConfig(chainDb) + if chainConfig == nil { + panic("No initialization mode supplied, chain data not in Db") } l2BlockChain, err = arbnode.GetBlockChain(chainDb, arbnode.DefaultCacheConfigFor(stack, nodeConfig.Node.Archive), chainConfig) if err != nil { @@ -262,6 +256,10 @@ func main() { if err != nil { panic(err) } + chainConfig, err = arbos.GetChainConfig(new(big.Int).SetUint64(nodeConfig.L2.ChainID), blockNum) + if err != nil { + panic(err) + } l2BlockChain, err = arbnode.WriteOrTestBlockChain(chainDb, arbnode.DefaultCacheConfigFor(stack, nodeConfig.Node.Archive), initDataReader, blockNum, chainConfig) if err != nil { panic(err) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index ada8cfec89..be8558c1ee 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -152,7 +152,11 @@ func main() { if err != nil { panic(fmt.Sprintf("Error getting chain ID from initial ArbOS state: %v", err.Error())) } - chainConfig, err := arbos.GetChainConfig(chainId) + genesisBlockNum, err := initialArbosState.GenesisBlockNum() + if err != nil { + panic(fmt.Sprintf("Error getting chain ID from initial ArbOS state: %v", err.Error())) + } + chainConfig, err := arbos.GetChainConfig(chainId, genesisBlockNum) if err != nil { panic(err) } @@ -160,7 +164,13 @@ func main() { message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) chainContext := WavmChainContext{} - newBlock, _ = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig) + batchFetcher := func(batchNum uint64) ([]byte, error) { + return wavmio.ReadInboxMessage(batchNum), nil + } + newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher) + if err != nil { + panic(err) + } } else { // Initialize ArbOS with this init message and create the genesis block. @@ -171,7 +181,7 @@ func main() { if err != nil { panic(err) } - chainConfig, err := arbos.GetChainConfig(chainId) + chainConfig, err := arbos.GetChainConfig(chainId, 0) if err != nil { panic(err) } @@ -180,7 +190,7 @@ func main() { panic(fmt.Sprintf("Error initializing ArbOS: %v", err.Error())) } - newBlock = arbosState.MakeGenesisBlock(common.Hash{}, 0, 0, statedb.IntermediateRoot(true)) + newBlock = arbosState.MakeGenesisBlock(common.Hash{}, 0, 0, statedb.IntermediateRoot(true), chainConfig) } diff --git a/contracts/deploy/InboxStubCreator.js b/contracts/deploy/InboxStubCreator.js index 4694ac8da4..69d23cf530 100644 --- a/contracts/deploy/InboxStubCreator.js +++ b/contracts/deploy/InboxStubCreator.js @@ -9,7 +9,7 @@ module.exports = async (hre) => { const inbox = await ethers.getContract("InboxStub"); if (inboxDeployResult.newlyDeployed) { - await bridge.setInbox(inbox.address, true); + await bridge.setDelayedInbox(inbox.address, true); await inbox.initialize(bridge.address); } }; diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts index 8d31f11426..e4026fdca4 100644 --- a/contracts/hardhat.config.ts +++ b/contracts/hardhat.config.ts @@ -41,6 +41,9 @@ module.exports = { mocha: { timeout: 0, }, + gasReporter: { + enabled: (process.env.DISABLE_GAS_REPORTER) ? false : true + }, typechain: { outDir: 'build/types', target: 'ethers-v5', diff --git a/contracts/src/bridge/Bridge.sol b/contracts/src/bridge/Bridge.sol index db347e28fc..68198ef881 100644 --- a/contracts/src/bridge/Bridge.sol +++ b/contracts/src/bridge/Bridge.sol @@ -4,21 +4,23 @@ pragma solidity ^0.8.4; -import "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; import "./IBridge.sol"; import "./Messages.sol"; import "../libraries/DelegateCallAware.sol"; +import {L1MessageType_batchPostingReport} from "../libraries/MessageTypes.sol"; + /** * @title Staging ground for incoming and outgoing messages - * @notice Holds the inbox accumulator for delayed messages, and is the ETH escrow - * for value sent with these messages. + * @notice Holds the inbox accumulator for sequenced and delayed messages. + * It is also the ETH escrow for value sent with these messages. * Since the escrow is held here, this contract also contains a list of allowed * outboxes that can make calls from here and withdraw this escrow. */ -contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { +contract Bridge is Initializable, DelegateCallAware, IBridge { using AddressUpgradeable for address; struct InOutInfo { @@ -26,22 +28,38 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { bool allowed; } - mapping(address => InOutInfo) private allowedInboxesMap; + mapping(address => InOutInfo) private allowedDelayedInboxesMap; mapping(address => InOutInfo) private allowedOutboxesMap; - address[] public allowedInboxList; + address[] public allowedDelayedInboxList; address[] public allowedOutboxList; address private _activeOutbox; /// @dev Accumulator for delayed inbox messages; tail represents hash of the current state; each element represents the inclusion of a new message. - bytes32[] public override inboxAccs; + bytes32[] public override delayedInboxAccs; + + /// @dev Accumulator for sequencer inbox messages; tail represents hash of the current state; each element represents the inclusion of a new message. + bytes32[] public override sequencerInboxAccs; + + IOwnable public override rollup; + address public sequencerInbox; address private constant EMPTY_ACTIVEOUTBOX = address(type(uint160).max); - function initialize() external initializer onlyDelegated { + function initialize(IOwnable rollup_) external initializer onlyDelegated { _activeOutbox = EMPTY_ACTIVEOUTBOX; - __Ownable_init(); + rollup = rollup_; + } + + modifier onlyRollupOrOwner() { + if (msg.sender != address(rollup)) { + address rollupOwner = rollup.owner(); + if (msg.sender != rollupOwner) { + revert NotRollupOrOwner(msg.sender, address(rollup), rollupOwner); + } + } + _; } /// @dev returns the address of current active Outbox, or zero if no outbox is active @@ -55,14 +73,64 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { return outbox; } - function allowedInboxes(address inbox) external view override returns (bool) { - return allowedInboxesMap[inbox].allowed; + function allowedDelayedInboxes(address inbox) external view override returns (bool) { + return allowedDelayedInboxesMap[inbox].allowed; } function allowedOutboxes(address outbox) external view override returns (bool) { return allowedOutboxesMap[outbox].allowed; } + modifier onlySequencerInbox() { + if (msg.sender != sequencerInbox) revert NotSequencerInbox(msg.sender); + _; + } + + function enqueueSequencerMessage(bytes32 dataHash, uint256 afterDelayedMessagesRead) + external + override + onlySequencerInbox + returns ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 acc + ) + { + seqMessageIndex = sequencerInboxAccs.length; + if (sequencerInboxAccs.length > 0) { + beforeAcc = sequencerInboxAccs[sequencerInboxAccs.length - 1]; + } + if (afterDelayedMessagesRead > 0) { + delayedAcc = delayedInboxAccs[afterDelayedMessagesRead - 1]; + } + acc = keccak256(abi.encodePacked(beforeAcc, dataHash, delayedAcc)); + sequencerInboxAccs.push(acc); + } + + /** + * @dev allows the sequencer inbox to submit a delayed message of the batchPostingReport type + * This is done through a separate function entrypoint instead of allowing the sequencer inbox + * to call `enqueueDelayedMessage` to avoid the gas overhead of an extra SLOAD in either + * every delayed inbox or every sequencer inbox call. + */ + function submitBatchSpendingReport(address sender, bytes32 messageDataHash) + external + override + onlySequencerInbox + returns (uint256) + { + return + addMessageToDelayedAccumulator( + L1MessageType_batchPostingReport, + sender, + uint64(block.number), + uint64(block.timestamp), // solhint-disable-line not-rely-on-time, + block.basefee, + messageDataHash + ); + } + /** * @dev Enqueue a message in the delayed inbox accumulator. * These messages are later sequenced in the SequencerInbox, either by the sequencer as @@ -73,9 +141,9 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { address sender, bytes32 messageDataHash ) external payable override returns (uint256) { - if (!allowedInboxesMap[msg.sender].allowed) revert NotInbox(msg.sender); + if (!allowedDelayedInboxesMap[msg.sender].allowed) revert NotDelayedInbox(msg.sender); return - addMessageToAccumulator( + addMessageToDelayedAccumulator( kind, sender, uint64(block.number), @@ -85,7 +153,7 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { ); } - function addMessageToAccumulator( + function addMessageToDelayedAccumulator( uint8 kind, address sender, uint64 blockNumber, @@ -93,7 +161,7 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { uint256 baseFeeL1, bytes32 messageDataHash ) internal returns (uint256) { - uint256 count = inboxAccs.length; + uint256 count = delayedInboxAccs.length; bytes32 messageHash = Messages.messageHash( kind, sender, @@ -105,9 +173,9 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { ); bytes32 prevAcc = 0; if (count > 0) { - prevAcc = inboxAccs[count - 1]; + prevAcc = delayedInboxAccs[count - 1]; } - inboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); + delayedInboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); emit MessageDelivered( count, prevAcc, @@ -140,25 +208,32 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { emit BridgeCallTriggered(msg.sender, to, value, data); } - function setInbox(address inbox, bool enabled) external override onlyOwner { - InOutInfo storage info = allowedInboxesMap[inbox]; + function setSequencerInbox(address _sequencerInbox) external override onlyRollupOrOwner { + sequencerInbox = _sequencerInbox; + emit SequencerInboxUpdated(_sequencerInbox); + } + + function setDelayedInbox(address inbox, bool enabled) external override onlyRollupOrOwner { + InOutInfo storage info = allowedDelayedInboxesMap[inbox]; bool alreadyEnabled = info.allowed; emit InboxToggle(inbox, enabled); if ((alreadyEnabled && enabled) || (!alreadyEnabled && !enabled)) { return; } if (enabled) { - allowedInboxesMap[inbox] = InOutInfo(allowedInboxList.length, true); - allowedInboxList.push(inbox); + allowedDelayedInboxesMap[inbox] = InOutInfo(allowedDelayedInboxList.length, true); + allowedDelayedInboxList.push(inbox); } else { - allowedInboxList[info.index] = allowedInboxList[allowedInboxList.length - 1]; - allowedInboxesMap[allowedInboxList[info.index]].index = info.index; - allowedInboxList.pop(); - delete allowedInboxesMap[inbox]; + allowedDelayedInboxList[info.index] = allowedDelayedInboxList[ + allowedDelayedInboxList.length - 1 + ]; + allowedDelayedInboxesMap[allowedDelayedInboxList[info.index]].index = info.index; + allowedDelayedInboxList.pop(); + delete allowedDelayedInboxesMap[inbox]; } } - function setOutbox(address outbox, bool enabled) external override onlyOwner { + function setOutbox(address outbox, bool enabled) external override onlyRollupOrOwner { if (outbox == EMPTY_ACTIVEOUTBOX) revert InvalidOutboxSet(outbox); InOutInfo storage info = allowedOutboxesMap[outbox]; @@ -178,7 +253,11 @@ contract Bridge is OwnableUpgradeable, DelegateCallAware, IBridge { } } - function messageCount() external view override returns (uint256) { - return inboxAccs.length; + function delayedMessageCount() external view override returns (uint256) { + return delayedInboxAccs.length; + } + + function sequencerMessageCount() external view override returns (uint256) { + return sequencerInboxAccs.length; } } diff --git a/contracts/src/bridge/IBridge.sol b/contracts/src/bridge/IBridge.sol index e3e01019d3..8cc5dd1ee0 100644 --- a/contracts/src/bridge/IBridge.sol +++ b/contracts/src/bridge/IBridge.sol @@ -4,11 +4,16 @@ pragma solidity ^0.8.4; -import {NotContract} from "../libraries/Error.sol"; +import {NotContract, NotRollupOrOwner} from "../libraries/Error.sol"; +import "./IOwnable.sol"; /// @dev Thrown when an un-authorized address tries to access an only-inbox function /// @param sender The un-authorized sender -error NotInbox(address sender); +error NotDelayedInbox(address sender); + +/// @dev Thrown when an un-authorized address tries to access an only-sequencer-inbox function +/// @param sender The un-authorized sender +error NotSequencerInbox(address sender); /// @dev Thrown when an un-authorized address tries to access an only-outbox function /// @param sender The un-authorized sender @@ -41,12 +46,27 @@ interface IBridge { event OutboxToggle(address indexed outbox, bool enabled); + event SequencerInboxUpdated(address newSequencerInbox); + function enqueueDelayedMessage( uint8 kind, address sender, bytes32 messageDataHash ) external payable returns (uint256); + function enqueueSequencerMessage(bytes32 dataHash, uint256 afterDelayedMessagesRead) + external + returns ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 acc + ); + + function submitBatchSpendingReport(address batchPoster, bytes32 dataHash) + external + returns (uint256 msgNum); + function executeCall( address to, uint256 value, @@ -54,19 +74,29 @@ interface IBridge { ) external returns (bool success, bytes memory returnData); // These are only callable by the admin - function setInbox(address inbox, bool enabled) external; + function setDelayedInbox(address inbox, bool enabled) external; function setOutbox(address inbox, bool enabled) external; + function setSequencerInbox(address _sequencerInbox) external; + // View functions + function sequencerInbox() external view returns (address); + function activeOutbox() external view returns (address); - function allowedInboxes(address inbox) external view returns (bool); + function allowedDelayedInboxes(address inbox) external view returns (bool); function allowedOutboxes(address outbox) external view returns (bool); - function inboxAccs(uint256 index) external view returns (bytes32); + function delayedInboxAccs(uint256 index) external view returns (bytes32); + + function sequencerInboxAccs(uint256 index) external view returns (bytes32); + + function delayedMessageCount() external view returns (uint256); + + function sequencerMessageCount() external view returns (uint256); - function messageCount() external view returns (uint256); + function rollup() external view returns (IOwnable); } diff --git a/contracts/src/bridge/IMessageProvider.sol b/contracts/src/bridge/IDelayedMessageProvider.sol similarity index 52% rename from contracts/src/bridge/IMessageProvider.sol rename to contracts/src/bridge/IDelayedMessageProvider.sol index afdfe8e306..7c6ef8ebc5 100644 --- a/contracts/src/bridge/IMessageProvider.sol +++ b/contracts/src/bridge/IDelayedMessageProvider.sol @@ -4,8 +4,11 @@ pragma solidity ^0.8.0; -interface IMessageProvider { +interface IDelayedMessageProvider { + /// @dev event emitted when a inbox message is added to the Bridge's delayed accumulator event InboxMessageDelivered(uint256 indexed messageNum, bytes data); + /// @dev event emitted when a inbox message is added to the Bridge's delayed accumulator + /// same as InboxMessageDelivered but the batch data is available in tx.input event InboxMessageDeliveredFromOrigin(uint256 indexed messageNum); } diff --git a/contracts/src/bridge/IInbox.sol b/contracts/src/bridge/IInbox.sol index 3ecf47dd7d..5dd163762f 100644 --- a/contracts/src/bridge/IInbox.sol +++ b/contracts/src/bridge/IInbox.sol @@ -5,7 +5,7 @@ pragma solidity ^0.8.4; import "./IBridge.sol"; -import "./IMessageProvider.sol"; +import "./IDelayedMessageProvider.sol"; import {AlreadyInit, NotOrigin, DataTooLarge} from "../libraries/Error.sol"; /// @dev The contract is paused, so cannot be paused @@ -41,7 +41,7 @@ error RetryableData( bytes data ); -interface IInbox is IMessageProvider { +interface IInbox is IDelayedMessageProvider { function sendL2Message(bytes calldata messageData) external returns (uint256); function sendUnsignedTransaction( @@ -88,6 +88,7 @@ interface IInbox is IMessageProvider { bytes calldata data ) external payable returns (uint256); + /// @notice TEMPORARILY DISABLED as exact mechanics are being worked out /// @dev Gas limit and maxFeePerGas should not be set to 1 as that is used to trigger the RetryableData error function unsafeCreateRetryableTicket( address to, diff --git a/contracts/src/bridge/IOwnable.sol b/contracts/src/bridge/IOwnable.sol new file mode 100644 index 0000000000..6202d9f17c --- /dev/null +++ b/contracts/src/bridge/IOwnable.sol @@ -0,0 +1,9 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity ^0.8.4; + +interface IOwnable { + function owner() external view returns (address); +} diff --git a/contracts/src/bridge/ISequencerInbox.sol b/contracts/src/bridge/ISequencerInbox.sol index 206fa22ba1..f0ba22ae37 100644 --- a/contracts/src/bridge/ISequencerInbox.sol +++ b/contracts/src/bridge/ISequencerInbox.sol @@ -6,8 +6,9 @@ pragma solidity ^0.8.0; import "../libraries/IGasRefunder.sol"; import {AlreadyInit, HadZeroInit, NotOrigin, DataTooLarge, NotRollup} from "../libraries/Error.sol"; +import "./IDelayedMessageProvider.sol"; -interface ISequencerInbox { +interface ISequencerInbox is IDelayedMessageProvider { struct MaxTimeVariation { uint256 delayBlocks; uint256 futureBlocks; @@ -55,9 +56,6 @@ interface ISequencerInbox { /// @dev Thrown when someone attempts to read more messages than exist error DelayedTooFar(); - /// @dev Thrown if the length of the header plus the length of the batch overflows - error DataLengthOverflow(); - /// @dev Force include can only read messages more blocks old than the delay period error ForceIncludeBlockTooSoon(); @@ -71,7 +69,7 @@ interface ISequencerInbox { error NotBatchPoster(); /// @dev The sequence number provided to this message was inconsistent with the number of batches already included - error BadSequencerNumber(); + error BadSequencerNumber(uint256 stored, uint256 received); /// @dev The batch data has the inbox authenticated bit set, but the batch data was not authenticated by the inbox error DataNotAuthenticated(); diff --git a/contracts/src/bridge/Inbox.sol b/contracts/src/bridge/Inbox.sol index f216cc6181..4abf37eb47 100644 --- a/contracts/src/bridge/Inbox.sol +++ b/contracts/src/bridge/Inbox.sol @@ -5,6 +5,7 @@ pragma solidity ^0.8.4; import "./IInbox.sol"; +import "./ISequencerInbox.sol"; import "./IBridge.sol"; import "./Messages.sol"; @@ -20,7 +21,6 @@ import { } from "../libraries/MessageTypes.sol"; import {MAX_DATA_SIZE} from "../libraries/Constants.sol"; -import "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; import "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; import "@openzeppelin/contracts-upgradeable/security/PausableUpgradeable.sol"; @@ -31,6 +31,7 @@ import "@openzeppelin/contracts-upgradeable/security/PausableUpgradeable.sol"; */ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { IBridge public override bridge; + ISequencerInbox public sequencerInbox; /// ------------------------------------ allow list start ------------------------------------ /// @@ -40,7 +41,7 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { event AllowListAddressSet(address indexed user, bool val); event AllowListEnabledUpdated(bool isEnabled); - function setAllowList(address[] memory user, bool[] memory val) external onlyOwner { + function setAllowList(address[] memory user, bool[] memory val) external onlyRollupOrOwner { require(user.length == val.length, "INVALID_INPUT"); for (uint256 i = 0; i < user.length; i++) { @@ -49,7 +50,7 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { } } - function setAllowListEnabled(bool _allowListEnabled) external onlyOwner { + function setAllowListEnabled(bool _allowListEnabled) external onlyRollupOrOwner { require(_allowListEnabled != allowListEnabled, "ALREADY_SET"); allowListEnabled = _allowListEnabled; emit AllowListEnabledUpdated(_allowListEnabled); @@ -66,26 +67,35 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { /// ------------------------------------ allow list end ------------------------------------ /// - modifier onlyOwner() { - // whoevever owns the Bridge, also owns the Inbox. this is usually the rollup contract - address bridgeOwner = OwnableUpgradeable(address(bridge)).owner(); - if (msg.sender != bridgeOwner) revert NotOwner(msg.sender, bridgeOwner); + modifier onlyRollupOrOwner() { + IOwnable rollup = bridge.rollup(); + if (msg.sender != address(rollup)) { + address rollupOwner = rollup.owner(); + if (msg.sender != rollupOwner) { + revert NotRollupOrOwner(msg.sender, address(rollup), rollupOwner); + } + } _; } /// @notice pauses all inbox functionality - function pause() external onlyOwner { + function pause() external onlyRollupOrOwner { _pause(); } /// @notice unpauses all inbox functionality - function unpause() external onlyOwner { + function unpause() external onlyRollupOrOwner { _unpause(); } - function initialize(IBridge _bridge) external initializer onlyDelegated { + function initialize(IBridge _bridge, ISequencerInbox _sequencerInbox) + external + initializer + onlyDelegated + { if (address(bridge) != address(0)) revert AlreadyInit(); bridge = _bridge; + sequencerInbox = _sequencerInbox; allowListEnabled = false; __Pausable_init(); } @@ -248,24 +258,23 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { /// Look into retryable tickets if you are interested in this functionality. /// @dev this function should not be called inside contract constructors function depositEth() public payable override whenNotPaused onlyAllowed returns (uint256) { - address sender = msg.sender; + address dest = msg.sender; // solhint-disable-next-line avoid-tx-origin - if (!AddressUpgradeable.isContract(sender) && tx.origin == msg.sender) { + if (AddressUpgradeable.isContract(msg.sender) || tx.origin != msg.sender) { // isContract check fails if this function is called during a contract's constructor. // We don't adjust the address for calls coming from L1 contracts since their addresses get remapped // If the caller is an EOA, we adjust the address. // This is needed because unsigned messages to the L2 (such as retryables) // have the L1 sender address mapped. - // Here we preemptively reverse the mapping for EOAs so deposits work as expected - sender = AddressAliasHelper.undoL1ToL2Alias(sender); + dest = AddressAliasHelper.applyL1ToL2Alias(msg.sender); } return _deliverMessage( L1MessageType_ethDeposit, - sender, // arb-os will add the alias to this value - abi.encodePacked(msg.value) + msg.sender, + abi.encodePacked(dest, msg.value) ); } @@ -344,8 +353,12 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { bytes calldata data ) external payable virtual override whenNotPaused onlyAllowed returns (uint256) { // ensure the user's deposit alone will make submission succeed - if (msg.value < maxSubmissionCost + l2CallValue) - revert InsufficientValue(maxSubmissionCost + l2CallValue, msg.value); + if (msg.value < (maxSubmissionCost + l2CallValue + gasLimit * maxFeePerGas)) { + revert InsufficientValue( + maxSubmissionCost + l2CallValue + gasLimit * maxFeePerGas, + msg.value + ); + } // if a refund address is a contract, we apply the alias to it // so that it can access its funds on the L2 @@ -359,7 +372,7 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { } return - unsafeCreateRetryableTicket( + unsafeCreateRetryableTicketInternal( to, l2CallValue, maxSubmissionCost, @@ -388,7 +401,7 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { * @param data ABI encoded data of L2 message * @return unique id for retryable transaction (keccak256(requestID, uint(0) ) */ - function unsafeCreateRetryableTicket( + function unsafeCreateRetryableTicketInternal( address to, uint256 l2CallValue, uint256 maxSubmissionCost, @@ -397,7 +410,7 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { uint256 gasLimit, uint256 maxFeePerGas, bytes calldata data - ) public payable virtual override whenNotPaused onlyAllowed returns (uint256) { + ) internal virtual whenNotPaused onlyAllowed returns (uint256) { // gas price and limit of 1 should never be a valid input, so instead they are used as // magic values to trigger a revert in eth calls that surface data without requiring a tx trace if (gasLimit == 1 || maxFeePerGas == 1) @@ -437,6 +450,19 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { ); } + function unsafeCreateRetryableTicket( + address, + uint256, + uint256, + address, + address, + uint256, + uint256, + bytes calldata + ) public payable override returns (uint256) { + revert("UNSAFE_RETRYABLES_TEMPORARILY_DISABLED"); + } + function _deliverMessage( uint8 _kind, address _sender, @@ -444,7 +470,11 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { ) internal returns (uint256) { if (_messageData.length > MAX_DATA_SIZE) revert DataTooLarge(_messageData.length, MAX_DATA_SIZE); - uint256 msgNum = deliverToBridge(_kind, _sender, keccak256(_messageData)); + uint256 msgNum = deliverToBridge( + _kind, + AddressAliasHelper.applyL1ToL2Alias(_sender), + keccak256(_messageData) + ); emit InboxMessageDelivered(msgNum, _messageData); return msgNum; } diff --git a/contracts/src/bridge/Outbox.sol b/contracts/src/bridge/Outbox.sol index 06c5c6ff5d..9dee5fab0f 100644 --- a/contracts/src/bridge/Outbox.sol +++ b/contracts/src/bridge/Outbox.sol @@ -41,8 +41,8 @@ contract Outbox is DelegateCallAware, IOutbox { uint128 public constant OUTBOX_VERSION = 2; - function initialize(address _rollup, IBridge _bridge) external onlyDelegated { - if (rollup != address(0)) revert AlreadyInit(); + function initialize(IBridge _bridge) external onlyDelegated { + if (address(bridge) != address(0)) revert AlreadyInit(); // address zero is returned if no context is set, but the values used in storage // are non-zero to save users some gas (as storage refunds are usually maxed out) // EIP-1153 would help here @@ -53,8 +53,8 @@ contract Outbox is DelegateCallAware, IOutbox { outputId: OUTPUTID_DEFAULT_CONTEXT, sender: SENDER_DEFAULT_CONTEXT }); - rollup = _rollup; bridge = _bridge; + rollup = address(_bridge.rollup()); } function updateSendRoot(bytes32 root, bytes32 l2BlockHash) external override { diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index 1c1e022886..dac329be22 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -5,10 +5,12 @@ pragma solidity ^0.8.0; import "./IBridge.sol"; +import "./IInbox.sol"; import "./ISequencerInbox.sol"; import "../rollup/IRollupLogic.sol"; import "./Messages.sol"; +import {L1MessageType_batchPostingReport} from "../libraries/MessageTypes.sol"; import {GasRefundEnabled, IGasRefunder} from "../libraries/IGasRefunder.sol"; import "../libraries/DelegateCallAware.sol"; import {MAX_DATA_SIZE} from "../libraries/Constants.sol"; @@ -21,10 +23,9 @@ import {MAX_DATA_SIZE} from "../libraries/Constants.sol"; * sequencer within a time limit they can be force included into the rollup inbox by anyone. */ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox { - bytes32[] public override inboxAccs; uint256 public totalDelayedMessagesRead; - IBridge public delayedBridge; + IBridge public bridge; /// @dev The size of the batch header uint256 public constant HEADER_LENGTH = 40; @@ -32,27 +33,29 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox /// the sequencer inbox has authenticated the data. Currently not used. bytes1 public constant DATA_AUTHENTICATED_FLAG = 0x40; - address public rollup; + IOwnable public rollup; mapping(address => bool) public isBatchPoster; ISequencerInbox.MaxTimeVariation public maxTimeVariation; - mapping(bytes32 => bool) public isValidKeysetHash; - mapping(bytes32 => uint256) public keysetHashCreationBlock; + struct DasKeySetInfo { + bool isValidKeyset; + uint64 creationBlock; + } + mapping(bytes32 => DasKeySetInfo) public dasKeySetInfo; modifier onlyRollupOwner() { - if (msg.sender != IRollupUserAbs(rollup).owner()) revert NotOwner(msg.sender, rollup); + if (msg.sender != rollup.owner()) revert NotOwner(msg.sender, address(rollup)); _; } function initialize( - IBridge delayedBridge_, - address rollup_, + IBridge bridge_, ISequencerInbox.MaxTimeVariation calldata maxTimeVariation_ ) external onlyDelegated { - if (delayedBridge != IBridge(address(0))) revert AlreadyInit(); - if (delayedBridge_ == IBridge(address(0))) revert HadZeroInit(); - delayedBridge = delayedBridge_; - rollup = rollup_; + if (bridge != IBridge(address(0))) revert AlreadyInit(); + if (bridge_ == IBridge(address(0))) revert HadZeroInit(); + bridge = bridge_; + rollup = bridge_.rollup(); maxTimeVariation = maxTimeVariation_; } @@ -107,22 +110,24 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox // Verify that message hash represents the last message sequence of delayed message to be included bytes32 prevDelayedAcc = 0; if (_totalDelayedMessagesRead > 1) { - prevDelayedAcc = delayedBridge.inboxAccs(_totalDelayedMessagesRead - 2); + prevDelayedAcc = bridge.delayedInboxAccs(_totalDelayedMessagesRead - 2); } if ( - delayedBridge.inboxAccs(_totalDelayedMessagesRead - 1) != + bridge.delayedInboxAccs(_totalDelayedMessagesRead - 1) != Messages.accumulateInboxMessage(prevDelayedAcc, messageHash) ) revert IncorrectMessagePreimage(); (bytes32 dataHash, TimeBounds memory timeBounds) = formEmptyDataHash( _totalDelayedMessagesRead ); - (bytes32 beforeAcc, bytes32 delayedAcc, bytes32 afterAcc) = addSequencerL2BatchImpl( - dataHash, - _totalDelayedMessagesRead - ); + ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 afterAcc + ) = addSequencerL2BatchImpl(dataHash, _totalDelayedMessagesRead, 0); emit SequencerBatchDelivered( - inboxAccs.length - 1, + seqMessageIndex, beforeAcc, afterAcc, delayedAcc, @@ -141,17 +146,20 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox // solhint-disable-next-line avoid-tx-origin if (msg.sender != tx.origin) revert NotOrigin(); if (!isBatchPoster[msg.sender]) revert NotBatchPoster(); - if (inboxAccs.length != sequenceNumber) revert BadSequencerNumber(); (bytes32 dataHash, TimeBounds memory timeBounds) = formDataHash( data, afterDelayedMessagesRead ); - (bytes32 beforeAcc, bytes32 delayedAcc, bytes32 afterAcc) = addSequencerL2BatchImpl( - dataHash, - afterDelayedMessagesRead - ); + ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 afterAcc + ) = addSequencerL2BatchImpl(dataHash, afterDelayedMessagesRead, data.length); + if (seqMessageIndex != sequenceNumber) + revert BadSequencerNumber(seqMessageIndex, sequenceNumber); emit SequencerBatchDelivered( - inboxAccs.length - 1, + sequenceNumber, beforeAcc, afterAcc, delayedAcc, @@ -167,17 +175,22 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox uint256 afterDelayedMessagesRead, IGasRefunder gasRefunder ) external override refundsGas(gasRefunder) { - if (!isBatchPoster[msg.sender] && msg.sender != rollup) revert NotBatchPoster(); - if (inboxAccs.length != sequenceNumber) revert BadSequencerNumber(); + if (!isBatchPoster[msg.sender] && msg.sender != address(rollup)) revert NotBatchPoster(); (bytes32 dataHash, TimeBounds memory timeBounds) = formDataHash( data, afterDelayedMessagesRead ); - (bytes32 beforeAcc, bytes32 delayedAcc, bytes32 afterAcc) = addSequencerL2BatchImpl( - dataHash, - afterDelayedMessagesRead - ); + // we set the calldata length posted to 0 here since the caller isn't the origin + // of the tx, so they might have not paid tx input cost for the calldata + ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 afterAcc + ) = addSequencerL2BatchImpl(dataHash, afterDelayedMessagesRead, 0); + if (seqMessageIndex != sequenceNumber) + revert BadSequencerNumber(seqMessageIndex, sequenceNumber); emit SequencerBatchDelivered( sequenceNumber, beforeAcc, @@ -190,15 +203,20 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox emit SequencerBatchData(sequenceNumber, data); } - function dasKeysetHashFromBatchData(bytes memory data) internal pure returns (bytes32) { - if (data.length < 33 || data[0] & 0x80 == 0) { - return bytes32(0); + modifier validateBatchData(bytes calldata data) { + uint256 fullDataLen = HEADER_LENGTH + data.length; + if (fullDataLen > MAX_DATA_SIZE) revert DataTooLarge(fullDataLen, MAX_DATA_SIZE); + if (data.length > 0 && (data[0] & DATA_AUTHENTICATED_FLAG) == DATA_AUTHENTICATED_FLAG) { + revert DataNotAuthenticated(); } - bytes32 temp; - assembly { - temp := mload(add(data, 33)) + // the first byte is used to identify the type of batch data + // das batches expect to have the type byte set, followed by the keyset (so they should have at least 33 bytes) + if (data.length >= 33 && data[0] & 0x80 != 0) { + // we skip the first byte, then read the next 32 bytes for the keyset + bytes32 dasKeysetHash = bytes32(data[1:33]); + if (!dasKeySetInfo[dasKeysetHash].isValidKeyset) revert NoSuchKeyset(dasKeysetHash); } - return temp; + _; } function packHeader(uint256 afterDelayedMessagesRead) @@ -222,29 +240,12 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox function formDataHash(bytes calldata data, uint256 afterDelayedMessagesRead) internal view + validateBatchData(data) returns (bytes32, TimeBounds memory) { - bytes32 dasKeysetHash = dasKeysetHashFromBatchData(data); - if (dasKeysetHash != bytes32(0)) { - if (!isValidKeysetHash[dasKeysetHash]) revert NoSuchKeyset(dasKeysetHash); - } - uint256 fullDataLen = HEADER_LENGTH + data.length; - if (fullDataLen < HEADER_LENGTH) revert DataLengthOverflow(); - if (fullDataLen > MAX_DATA_SIZE) revert DataTooLarge(fullDataLen, MAX_DATA_SIZE); - bytes memory fullData = new bytes(fullDataLen); (bytes memory header, TimeBounds memory timeBounds) = packHeader(afterDelayedMessagesRead); - - for (uint256 i = 0; i < HEADER_LENGTH; i++) { - fullData[i] = header[i]; - } - if (data.length > 0 && (data[0] & DATA_AUTHENTICATED_FLAG) == DATA_AUTHENTICATED_FLAG) { - revert DataNotAuthenticated(); - } - // copy data into fullData at offset of HEADER_LENGTH (the extra 32 offset is because solidity puts the array len first) - assembly { - calldatacopy(add(fullData, add(HEADER_LENGTH, 32)), data.offset, data.length) - } - return (keccak256(fullData), timeBounds); + bytes32 dataHash = keccak256(bytes.concat(header, data)); + return (dataHash, timeBounds); } function formEmptyDataHash(uint256 afterDelayedMessagesRead) @@ -256,31 +257,55 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox return (keccak256(header), timeBounds); } - function addSequencerL2BatchImpl(bytes32 dataHash, uint256 afterDelayedMessagesRead) + function addSequencerL2BatchImpl( + bytes32 dataHash, + uint256 afterDelayedMessagesRead, + uint256 calldataLengthPosted + ) internal returns ( + uint256 seqMessageIndex, bytes32 beforeAcc, bytes32 delayedAcc, bytes32 acc ) { if (afterDelayedMessagesRead < totalDelayedMessagesRead) revert DelayedBackwards(); - if (afterDelayedMessagesRead > delayedBridge.messageCount()) revert DelayedTooFar(); + if (afterDelayedMessagesRead > bridge.delayedMessageCount()) revert DelayedTooFar(); - if (inboxAccs.length > 0) { - beforeAcc = inboxAccs[inboxAccs.length - 1]; - } - if (afterDelayedMessagesRead > 0) { - delayedAcc = delayedBridge.inboxAccs(afterDelayedMessagesRead - 1); - } + (seqMessageIndex, beforeAcc, delayedAcc, acc) = bridge.enqueueSequencerMessage( + dataHash, + afterDelayedMessagesRead + ); - acc = keccak256(abi.encodePacked(beforeAcc, dataHash, delayedAcc)); - inboxAccs.push(acc); totalDelayedMessagesRead = afterDelayedMessagesRead; + + if (calldataLengthPosted > 0) { + // this msg isn't included in the current sequencer batch, but instead added to + // the delayed messages queue that is yet to be included + address batchPoster = msg.sender; + bytes memory spendingReportMsg = abi.encodePacked( + block.timestamp, + batchPoster, + dataHash, + seqMessageIndex, + block.basefee + ); + uint256 msgNum = bridge.submitBatchSpendingReport( + batchPoster, + keccak256(spendingReportMsg) + ); + // this is the same event used by Inbox.sol after including a message to the delayed message accumulator + emit InboxMessageDelivered(msgNum, spendingReportMsg); + } + } + + function inboxAccs(uint256 index) external view override returns (bytes32) { + return bridge.sequencerInboxAccs(index); } function batchCount() external view override returns (uint256) { - return inboxAccs.length; + return bridge.sequencerMessageCount(); } /** @@ -289,6 +314,7 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox */ function setMaxTimeVariation(ISequencerInbox.MaxTimeVariation memory maxTimeVariation_) external + override onlyRollupOwner { maxTimeVariation = maxTimeVariation_; @@ -300,7 +326,7 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param addr the address * @param isBatchPoster_ if the specified address should be authorized as a batch poster */ - function setIsBatchPoster(address addr, bool isBatchPoster_) external onlyRollupOwner { + function setIsBatchPoster(address addr, bool isBatchPoster_) external override onlyRollupOwner { isBatchPoster[addr] = isBatchPoster_; emit OwnerFunctionCalled(1); } @@ -309,11 +335,13 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @notice Makes Data Availability Service keyset valid * @param keysetBytes bytes of the serialized keyset */ - function setValidKeyset(bytes calldata keysetBytes) external onlyRollupOwner { + function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { bytes32 ksHash = keccak256(keysetBytes); - if (isValidKeysetHash[ksHash]) revert AlreadyValidDASKeyset(ksHash); - isValidKeysetHash[ksHash] = true; - keysetHashCreationBlock[ksHash] = block.number; + if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); + dasKeySetInfo[ksHash] = DasKeySetInfo({ + isValidKeyset: true, + creationBlock: uint64(block.number) + }); emit SetValidKeyset(ksHash, keysetBytes); emit OwnerFunctionCalled(2); } @@ -322,16 +350,24 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @notice Invalidates a Data Availability Service keyset * @param ksHash hash of the keyset */ - function invalidateKeysetHash(bytes32 ksHash) external onlyRollupOwner { - if (!isValidKeysetHash[ksHash]) revert NoSuchKeyset(ksHash); - isValidKeysetHash[ksHash] = false; + function invalidateKeysetHash(bytes32 ksHash) external override onlyRollupOwner { + if (!dasKeySetInfo[ksHash].isValidKeyset) revert NoSuchKeyset(ksHash); + // we don't delete the block creation value since its used to fetch the SetValidKeyset + // event efficiently. The event provides the hash preimage of the key. + // this is still needed when syncing the chain after a keyset is invalidated. + dasKeySetInfo[ksHash].isValidKeyset = false; emit InvalidateKeyset(ksHash); emit OwnerFunctionCalled(3); } + function isValidKeysetHash(bytes32 ksHash) external view returns (bool) { + return dasKeySetInfo[ksHash].isValidKeyset; + } + + /// @notice the creation block is intended to still be available after a keyset is deleted function getKeysetCreationBlock(bytes32 ksHash) external view returns (uint256) { - uint256 bnum = keysetHashCreationBlock[ksHash]; - if (bnum == 0) revert NoSuchKeyset(ksHash); - return bnum; + DasKeySetInfo memory ksInfo = dasKeySetInfo[ksHash]; + if (ksInfo.creationBlock == 0) revert NoSuchKeyset(ksHash); + return uint256(ksInfo.creationBlock); } } diff --git a/contracts/src/challenge/ChallengeLib.sol b/contracts/src/challenge/ChallengeLib.sol index f1b660c5b3..e225ea1fe4 100644 --- a/contracts/src/challenge/ChallengeLib.sol +++ b/contracts/src/challenge/ChallengeLib.sol @@ -61,14 +61,12 @@ library ChallengeLib { ValueArray memory valuesArray = ValueArray({inner: startingValues}); ValueStack memory values = ValueStack({proved: valuesArray, remainingHash: 0}); ValueStack memory internalStack; - PcStack memory blocks; StackFrameWindow memory frameStack; Machine memory mach = Machine({ status: MachineStatus.RUNNING, valueStack: values, internalStack: internalStack, - blockStack: blocks, frameStack: frameStack, globalStateHash: globalStateHash, moduleIdx: 0, diff --git a/contracts/src/challenge/ChallengeManager.sol b/contracts/src/challenge/ChallengeManager.sol index cc2ef89508..e89214ad37 100644 --- a/contracts/src/challenge/ChallengeManager.sol +++ b/contracts/src/challenge/ChallengeManager.sol @@ -33,7 +33,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { IChallengeResultReceiver public resultReceiver; ISequencerInbox public sequencerInbox; - IBridge public delayedBridge; + IBridge public bridge; IOneStepProofEntry public osp; function challengeInfo(uint64 challengeIndex) @@ -99,14 +99,14 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { function initialize( IChallengeResultReceiver resultReceiver_, ISequencerInbox sequencerInbox_, - IBridge delayedBridge_, + IBridge bridge_, IOneStepProofEntry osp_ ) external override onlyDelegated { require(address(resultReceiver) == address(0), "ALREADY_INIT"); require(address(resultReceiver_) != address(0), "NO_RESULT_RECEIVER"); resultReceiver = resultReceiver_; sequencerInbox = sequencerInbox_; - delayedBridge = delayedBridge_; + bridge = bridge_; osp = osp_; } @@ -254,11 +254,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { } bytes32 afterHash = osp.proveOneStep( - ExecutionContext({ - maxInboxMessagesRead: challenge.maxInboxMessages, - sequencerInbox: sequencerInbox, - delayedBridge: delayedBridge - }), + ExecutionContext({maxInboxMessagesRead: challenge.maxInboxMessages, bridge: bridge}), challengeStart, selection.oldSegments[selection.challengePosition], proof diff --git a/contracts/src/challenge/IChallengeManager.sol b/contracts/src/challenge/IChallengeManager.sol index efa62fcf45..0f033cb4b5 100644 --- a/contracts/src/challenge/IChallengeManager.sol +++ b/contracts/src/challenge/IChallengeManager.sol @@ -43,7 +43,7 @@ interface IChallengeManager { function initialize( IChallengeResultReceiver resultReceiver_, ISequencerInbox sequencerInbox_, - IBridge delayedBridge_, + IBridge bridge_, IOneStepProofEntry osp_ ) external; diff --git a/contracts/src/libraries/Error.sol b/contracts/src/libraries/Error.sol index 1a2d19a112..20619d825a 100644 --- a/contracts/src/libraries/Error.sol +++ b/contracts/src/libraries/Error.sol @@ -36,3 +36,9 @@ error NotContract(address addr); /// @param actualLength The length of the merkle proof provided /// @param maxProofLength The max length a merkle proof can have error MerkleProofTooLong(uint256 actualLength, uint256 maxProofLength); + +/// @dev Thrown when an un-authorized address tries to access an admin function +/// @param sender The un-authorized sender +/// @param rollup The rollup, which would be authorized +/// @param owner The rollup's owner, which would be authorized +error NotRollupOrOwner(address sender, address rollup, address owner); diff --git a/contracts/src/libraries/MessageTypes.sol b/contracts/src/libraries/MessageTypes.sol index 770a8135ea..093cb332a2 100644 --- a/contracts/src/libraries/MessageTypes.sol +++ b/contracts/src/libraries/MessageTypes.sol @@ -8,6 +8,7 @@ uint8 constant L2_MSG = 3; uint8 constant L1MessageType_L2FundedByL1 = 7; uint8 constant L1MessageType_submitRetryableTx = 9; uint8 constant L1MessageType_ethDeposit = 12; +uint8 constant L1MessageType_batchPostingReport = 13; uint8 constant L2MessageType_unsignedEOATx = 0; uint8 constant L2MessageType_unsignedContractTx = 1; diff --git a/contracts/src/mocks/BridgeStub.sol b/contracts/src/mocks/BridgeStub.sol index be3fd65435..a4cc93cf7e 100644 --- a/contracts/src/mocks/BridgeStub.sol +++ b/contracts/src/mocks/BridgeStub.sol @@ -14,19 +14,28 @@ contract BridgeStub is IBridge { bool allowed; } - mapping(address => InOutInfo) private allowedInboxesMap; + mapping(address => InOutInfo) private allowedDelayedInboxesMap; //mapping(address => InOutInfo) private allowedOutboxesMap; - address[] public allowedInboxList; + address[] public allowedDelayedInboxList; address[] public allowedOutboxList; address public override activeOutbox; // Accumulator for delayed inbox; tail represents hash of the current state; each element represents the inclusion of a new message. - bytes32[] public override inboxAccs; + bytes32[] public override delayedInboxAccs; - function allowedInboxes(address inbox) external view override returns (bool) { - return allowedInboxesMap[inbox].allowed; + bytes32[] public override sequencerInboxAccs; + + address public sequencerInbox; + + function setSequencerInbox(address _sequencerInbox) external override { + sequencerInbox = _sequencerInbox; + emit SequencerInboxUpdated(_sequencerInbox); + } + + function allowedDelayedInboxes(address inbox) external view override returns (bool) { + return allowedDelayedInboxesMap[inbox].allowed; } function allowedOutboxes(address) external pure override returns (bool) { @@ -38,9 +47,9 @@ contract BridgeStub is IBridge { address sender, bytes32 messageDataHash ) external payable override returns (uint256) { - require(allowedInboxesMap[msg.sender].allowed, "NOT_FROM_INBOX"); + require(allowedDelayedInboxesMap[msg.sender].allowed, "NOT_FROM_INBOX"); return - addMessageToAccumulator( + addMessageToDelayedAccumulator( kind, sender, block.number, @@ -50,7 +59,34 @@ contract BridgeStub is IBridge { ); } - function addMessageToAccumulator( + function enqueueSequencerMessage(bytes32 dataHash, uint256 afterDelayedMessagesRead) + external + returns ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 acc + ) + { + seqMessageIndex = sequencerInboxAccs.length; + if (sequencerInboxAccs.length > 0) { + beforeAcc = sequencerInboxAccs[sequencerInboxAccs.length - 1]; + } + if (afterDelayedMessagesRead > 0) { + delayedAcc = delayedInboxAccs[afterDelayedMessagesRead - 1]; + } + acc = keccak256(abi.encodePacked(beforeAcc, dataHash, delayedAcc)); + sequencerInboxAccs.push(acc); + } + + function submitBatchSpendingReport(address batchPoster, bytes32 dataHash) + external + returns (uint256) + { + // TODO: implement stub + } + + function addMessageToDelayedAccumulator( uint8, address, uint256, @@ -58,7 +94,7 @@ contract BridgeStub is IBridge { uint256, bytes32 messageDataHash ) internal returns (uint256) { - uint256 count = inboxAccs.length; + uint256 count = delayedInboxAccs.length; bytes32 messageHash = Messages.messageHash( 0, address(uint160(0)), @@ -70,9 +106,9 @@ contract BridgeStub is IBridge { ); bytes32 prevAcc = 0; if (count > 0) { - prevAcc = inboxAccs[count - 1]; + prevAcc = delayedInboxAccs[count - 1]; } - inboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); + delayedInboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); return count; } @@ -84,21 +120,23 @@ contract BridgeStub is IBridge { revert("NOT_IMPLEMENTED"); } - function setInbox(address inbox, bool enabled) external override { - InOutInfo storage info = allowedInboxesMap[inbox]; + function setDelayedInbox(address inbox, bool enabled) external override { + InOutInfo storage info = allowedDelayedInboxesMap[inbox]; bool alreadyEnabled = info.allowed; emit InboxToggle(inbox, enabled); if ((alreadyEnabled && enabled) || (!alreadyEnabled && !enabled)) { return; } if (enabled) { - allowedInboxesMap[inbox] = InOutInfo(allowedInboxList.length, true); - allowedInboxList.push(inbox); + allowedDelayedInboxesMap[inbox] = InOutInfo(allowedDelayedInboxList.length, true); + allowedDelayedInboxList.push(inbox); } else { - allowedInboxList[info.index] = allowedInboxList[allowedInboxList.length - 1]; - allowedInboxesMap[allowedInboxList[info.index]].index = info.index; - allowedInboxList.pop(); - delete allowedInboxesMap[inbox]; + allowedDelayedInboxList[info.index] = allowedDelayedInboxList[ + allowedDelayedInboxList.length - 1 + ]; + allowedDelayedInboxesMap[allowedDelayedInboxList[info.index]].index = info.index; + allowedDelayedInboxList.pop(); + delete allowedDelayedInboxesMap[inbox]; } } @@ -109,7 +147,15 @@ contract BridgeStub is IBridge { revert("NOT_IMPLEMENTED"); } - function messageCount() external view override returns (uint256) { - return inboxAccs.length; + function delayedMessageCount() external view override returns (uint256) { + return delayedInboxAccs.length; + } + + function sequencerMessageCount() external view override returns (uint256) { + return sequencerInboxAccs.length; + } + + function rollup() external pure override returns (IOwnable) { + revert("NOT_IMPLEMENTED"); } } diff --git a/contracts/src/mocks/SequencerInboxStub.sol b/contracts/src/mocks/SequencerInboxStub.sol index f11e3a1dd0..4252be97ae 100644 --- a/contracts/src/mocks/SequencerInboxStub.sol +++ b/contracts/src/mocks/SequencerInboxStub.sol @@ -8,24 +8,26 @@ import "../bridge/SequencerInbox.sol"; contract SequencerInboxStub is SequencerInbox { constructor( - IBridge delayedBridge_, + IBridge bridge_, address sequencer_, ISequencerInbox.MaxTimeVariation memory maxTimeVariation_ ) { - delayedBridge = delayedBridge_; - rollup = msg.sender; + bridge = bridge_; + rollup = IOwnable(msg.sender); maxTimeVariation = maxTimeVariation_; isBatchPoster[sequencer_] = true; } function addInitMessage() external { (bytes32 dataHash, TimeBounds memory timeBounds) = formEmptyDataHash(0); - (bytes32 beforeAcc, bytes32 delayedAcc, bytes32 afterAcc) = addSequencerL2BatchImpl( - dataHash, - 0 - ); + ( + uint256 sequencerMessageCount, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 afterAcc + ) = addSequencerL2BatchImpl(dataHash, 0, 0); emit SequencerBatchDelivered( - inboxAccs.length - 1, + sequencerMessageCount, beforeAcc, afterAcc, delayedAcc, diff --git a/contracts/src/mocks/Simple.sol b/contracts/src/mocks/Simple.sol index 28c2905906..ba75f814bb 100644 --- a/contracts/src/mocks/Simple.sol +++ b/contracts/src/mocks/Simple.sol @@ -4,10 +4,13 @@ pragma solidity ^0.8.0; +import "../precompiles/ArbRetryableTx.sol"; + contract Simple { uint64 public counter; event CounterEvent(uint64 count); + event RedeemedEvent(address caller, address redeemer); event NullEvent(); function increment() external { @@ -19,6 +22,11 @@ contract Simple { emit CounterEvent(counter); } + function incrementRedeem() external { + counter++; + emit RedeemedEvent(msg.sender, ArbRetryableTx(address(110)).getCurrentRedeemer()); + } + function emitNullEvent() external { emit NullEvent(); } diff --git a/contracts/src/node-interface/NodeInterface.sol b/contracts/src/node-interface/NodeInterface.sol index 71ea41536a..4dc2c355b9 100644 --- a/contracts/src/node-interface/NodeInterface.sol +++ b/contracts/src/node-interface/NodeInterface.sol @@ -11,7 +11,8 @@ pragma solidity >=0.4.21 <0.9.0; */ interface NodeInterface { /** - * @notice Estimate the cost of putting a message in the L2 inbox that is reexecuted + * @notice Estimate the cost of putting a message in the L2 inbox that is reexecuted. + * Use eth_estimateGas to call. * @param sender sender of the L1 and L2 transaction * @param deposit amount to deposit to sender in L2 * @param to destination L2 contract address @@ -31,7 +32,8 @@ interface NodeInterface { ) external; /** - * @notice Constructs an outbox proof of an l2->l1 send's existence in the outbox accumulator + * @notice Constructs an outbox proof of an l2->l1 send's existence in the outbox accumulator. + * Use eth_call to call. * @param size the number of elements in the accumulator * @param leaf the position of the send in the accumulator * @return send the l2->l1 send's hash @@ -48,8 +50,9 @@ interface NodeInterface { ); /** - * @notice Finds the L1 batch containing a requested L2 block, reverting if none does - * Throws if block doesn't exist, or if block number is 0 + * @notice Finds the L1 batch containing a requested L2 block, reverting if none does. + * Use eth_call to call. + * Throws if block doesn't exist, or if block number is 0. Use eth_call * @param blockNum The L2 block being queried * @return batch The L1 block containing the requested L2 block */ @@ -59,9 +62,33 @@ interface NodeInterface { * @notice Gets the number of L1 confirmations of the sequencer batch producing the requested L2 block * This gets the number of L1 confirmations for the input message producing the L2 block, * which happens well before the L1 rollup contract confirms the L2 block. - * Throws if block doesnt exist in the L2 chain. + * Throws if block doesnt exist in the L2 chain. Use eth_call to call. * @param blockHash The hash of the L2 block being queried * @return confirmations The number of L1 confirmations the sequencer batch has. Returns 0 if block not yet included in an L1 batch. */ function getL1Confirmations(bytes32 blockHash) external view returns (uint64 confirmations); + + /** + * @notice Same as native gas estimation, but with additional info on the l1 costs. Use eth_call to call. + * @param data the tx's calldata. Everything else like "From" and "Gas" are copied over + * @param to the tx's "To" (ignored when contractCreation is true) + * @param contractCreation whether "To" is omitted + * @return gasEstimate an estimate of the total amount of gas needed for this tx + * @return gasEstimateForL1 an estimate of the amount of gas needed for the l1 component of this tx + * @return baseFee the l2 base fee + * @return l1BaseFeeEstimate ArbOS's l1 estimate of the l1 base fee + */ + function gasEstimateComponents( + address to, + bool contractCreation, + bytes calldata data + ) + external + payable + returns ( + uint64 gasEstimate, + uint64 gasEstimateForL1, + uint256 baseFee, + uint256 l1BaseFeeEstimate + ); } diff --git a/contracts/src/osp/IOneStepProver.sol b/contracts/src/osp/IOneStepProver.sol index 834af62c5d..e26ff60429 100644 --- a/contracts/src/osp/IOneStepProver.sol +++ b/contracts/src/osp/IOneStepProver.sol @@ -12,8 +12,7 @@ import "../bridge/IBridge.sol"; struct ExecutionContext { uint256 maxInboxMessagesRead; - ISequencerInbox sequencerInbox; - IBridge delayedBridge; + IBridge bridge; } abstract contract IOneStepProver { diff --git a/contracts/src/osp/OneStepProver0.sol b/contracts/src/osp/OneStepProver0.sol index 4b50313cdb..2767a8e980 100644 --- a/contracts/src/osp/OneStepProver0.sol +++ b/contracts/src/osp/OneStepProver0.sol @@ -12,7 +12,6 @@ import "./IOneStepProver.sol"; contract OneStepProver0 is IOneStepProver { using MerkleProofLib for MerkleProof; - using PcStackLib for PcStack; using StackFrameLib for StackFrameWindow; using ValueLib for Value; using ValueStackLib for ValueStack; @@ -51,8 +50,6 @@ contract OneStepProver0 is IOneStepProver { ty = ValueType.F32; } else if (opcode == Instructions.F64_CONST) { ty = ValueType.F64; - } else if (opcode == Instructions.PUSH_STACK_BOUNDARY) { - ty = ValueType.STACK_BOUNDARY; } else { revert("CONST_PUSH_INVALID_OPCODE"); } @@ -86,39 +83,6 @@ contract OneStepProver0 is IOneStepProver { } } - function executeBlock( - Machine memory mach, - Module memory, - Instruction calldata inst, - bytes calldata - ) internal pure { - uint32 targetPc = uint32(inst.argumentData); - require(targetPc == inst.argumentData, "BAD_BLOCK_PC"); - mach.blockStack.push(targetPc); - } - - function executeBranch( - Machine memory mach, - Module memory, - Instruction calldata, - bytes calldata - ) internal pure { - mach.functionPc = mach.blockStack.pop(); - } - - function executeBranchIf( - Machine memory mach, - Module memory, - Instruction calldata, - bytes calldata - ) internal pure { - uint32 cond = mach.valueStack.pop().assumeI32(); - if (cond != 0) { - // Jump to target - mach.functionPc = mach.blockStack.pop(); - } - } - function executeReturn( Machine memory mach, Module memory, @@ -419,27 +383,6 @@ contract OneStepProver0 is IOneStepProver { ); } - function executeEndBlock( - Machine memory mach, - Module memory, - Instruction calldata, - bytes calldata - ) internal pure { - mach.blockStack.pop(); - } - - function executeEndBlockIf( - Machine memory mach, - Module memory, - Instruction calldata, - bytes calldata - ) internal pure { - uint32 cond = mach.valueStack.peek().assumeI32(); - if (cond != 0) { - mach.blockStack.pop(); - } - } - function executeInitFrame( Machine memory mach, Module memory, @@ -476,20 +419,6 @@ contract OneStepProver0 is IOneStepProver { } } - function executeIsStackBoundary( - Machine memory mach, - Module memory, - Instruction calldata, - bytes calldata - ) internal pure { - Value memory val = mach.valueStack.pop(); - uint32 newContents = 0; - if (val.valueType == ValueType.STACK_BOUNDARY) { - newContents = 1; - } - mach.valueStack.push(ValueLib.newI32(newContents)); - } - function executeDup( Machine memory mach, Module memory, @@ -519,12 +448,6 @@ contract OneStepProver0 is IOneStepProver { impl = executeUnreachable; } else if (opcode == Instructions.NOP) { impl = executeNop; - } else if (opcode == Instructions.BLOCK) { - impl = executeBlock; - } else if (opcode == Instructions.BRANCH) { - impl = executeBranch; - } else if (opcode == Instructions.BRANCH_IF) { - impl = executeBranchIf; } else if (opcode == Instructions.RETURN) { impl = executeReturn; } else if (opcode == Instructions.CALL) { @@ -535,10 +458,6 @@ contract OneStepProver0 is IOneStepProver { impl = executeCallerModuleInternalCall; } else if (opcode == Instructions.CALL_INDIRECT) { impl = executeCallIndirect; - } else if (opcode == Instructions.END_BLOCK) { - impl = executeEndBlock; - } else if (opcode == Instructions.END_BLOCK_IF) { - impl = executeEndBlockIf; } else if (opcode == Instructions.ARBITRARY_JUMP) { impl = executeArbitraryJump; } else if (opcode == Instructions.ARBITRARY_JUMP_IF) { @@ -557,18 +476,13 @@ contract OneStepProver0 is IOneStepProver { impl = executeDrop; } else if (opcode == Instructions.SELECT) { impl = executeSelect; - } else if ( - (opcode >= Instructions.I32_CONST && opcode <= Instructions.F64_CONST) || - opcode == Instructions.PUSH_STACK_BOUNDARY - ) { + } else if (opcode >= Instructions.I32_CONST && opcode <= Instructions.F64_CONST) { impl = executeConstPush; } else if ( opcode == Instructions.MOVE_FROM_STACK_TO_INTERNAL || opcode == Instructions.MOVE_FROM_INTERNAL_TO_STACK ) { impl = executeMoveInternal; - } else if (opcode == Instructions.IS_STACK_BOUNDARY) { - impl = executeIsStackBoundary; } else if (opcode == Instructions.DUP) { impl = executeDup; } else { diff --git a/contracts/src/osp/OneStepProverHostIo.sol b/contracts/src/osp/OneStepProverHostIo.sol index 8d8b52f11b..bbedf000e3 100644 --- a/contracts/src/osp/OneStepProverHostIo.sol +++ b/contracts/src/osp/OneStepProverHostIo.sol @@ -10,7 +10,6 @@ import "../state/Deserialize.sol"; import "./IOneStepProver.sol"; import "../bridge/Messages.sol"; import "../bridge/IBridge.sol"; -import "../bridge/ISequencerInbox.sol"; contract OneStepProverHostIo is IOneStepProver { using GlobalStateLib for GlobalState; @@ -165,13 +164,13 @@ contract OneStepProverHostIo is IOneStepProver { bytes32 delayedAcc; if (msgIndex > 0) { - beforeAcc = execCtx.sequencerInbox.inboxAccs(msgIndex - 1); + beforeAcc = execCtx.bridge.sequencerInboxAccs(msgIndex - 1); } if (afterDelayedMsg > 0) { - delayedAcc = execCtx.delayedBridge.inboxAccs(afterDelayedMsg - 1); + delayedAcc = execCtx.bridge.delayedInboxAccs(afterDelayedMsg - 1); } bytes32 acc = keccak256(abi.encodePacked(beforeAcc, messageHash, delayedAcc)); - require(acc == execCtx.sequencerInbox.inboxAccs(msgIndex), "BAD_SEQINBOX_MESSAGE"); + require(acc == execCtx.bridge.sequencerInboxAccs(msgIndex), "BAD_SEQINBOX_MESSAGE"); return true; } @@ -185,7 +184,7 @@ contract OneStepProverHostIo is IOneStepProver { bytes32 beforeAcc; if (msgIndex > 0) { - beforeAcc = execCtx.delayedBridge.inboxAccs(msgIndex - 1); + beforeAcc = execCtx.bridge.delayedInboxAccs(msgIndex - 1); } bytes32 messageDataHash = keccak256(message[DELAYED_HEADER_LEN:]); @@ -198,7 +197,7 @@ contract OneStepProverHostIo is IOneStepProver { ); bytes32 acc = Messages.accumulateInboxMessage(beforeAcc, messageHash); - require(acc == execCtx.delayedBridge.inboxAccs(msgIndex), "BAD_DELAYED_MESSAGE"); + require(acc == execCtx.bridge.delayedInboxAccs(msgIndex), "BAD_DELAYED_MESSAGE"); return true; } diff --git a/contracts/src/osp/OneStepProverMath.sol b/contracts/src/osp/OneStepProverMath.sol index 498d0ca66d..1a23347463 100644 --- a/contracts/src/osp/OneStepProverMath.sol +++ b/contracts/src/osp/OneStepProverMath.sol @@ -210,38 +210,38 @@ contract OneStepProverMath is IOneStepProver { uint64 a, uint64 b, uint16 opcodeOffset - ) internal pure returns (uint64) { + ) internal pure returns (uint64, bool) { unchecked { if (opcodeOffset == 0) { // add - return a + b; + return (a + b, false); } else if (opcodeOffset == 1) { // sub - return a - b; + return (a - b, false); } else if (opcodeOffset == 2) { // mul - return a * b; + return (a * b, false); } else if (opcodeOffset == 4) { // div_u if (b == 0) { - return 0; + return (0, true); } - return a / b; + return (a / b, false); } else if (opcodeOffset == 6) { // rem_u if (b == 0) { - return 0; + return (0, true); } - return a % b; + return (a % b, false); } else if (opcodeOffset == 7) { // and - return a & b; + return (a & b, false); } else if (opcodeOffset == 8) { // or - return a | b; + return (a | b, false); } else if (opcodeOffset == 9) { // xor - return a ^ b; + return (a ^ b, false); } else { revert("INVALID_GENERIC_BIN_OP"); } @@ -263,18 +263,18 @@ contract OneStepProverMath is IOneStepProver { unchecked { if (opcodeOffset == 3) { // div_s - if (b == 0) { - res = 0; - } else { - res = uint32(int32(a) / int32(b)); + if (b == 0 || (int32(a) == -2147483648 && int32(b) == -1)) { + mach.status = MachineStatus.ERRORED; + return; } + res = uint32(int32(a) / int32(b)); } else if (opcodeOffset == 5) { // rem_s if (b == 0) { - res = 0; - } else { - res = uint32(int32(a) % int32(b)); + mach.status = MachineStatus.ERRORED; + return; } + res = uint32(int32(a) % int32(b)); } else if (opcodeOffset == 10) { // shl res = a << (b % 32); @@ -291,7 +291,12 @@ contract OneStepProverMath is IOneStepProver { // rotr res = rotr32(a, b); } else { - res = uint32(genericBinOp(a, b, opcodeOffset)); + (uint64 computed, bool err) = genericBinOp(a, b, opcodeOffset); + if (err) { + mach.status = MachineStatus.ERRORED; + return; + } + res = uint32(computed); } } @@ -313,18 +318,18 @@ contract OneStepProverMath is IOneStepProver { unchecked { if (opcodeOffset == 3) { // div_s - if (b == 0) { - res = 0; - } else { - res = uint64(int64(a) / int64(b)); + if (b == 0 || (int64(a) == -9223372036854775808 && int64(b) == -1)) { + mach.status = MachineStatus.ERRORED; + return; } + res = uint64(int64(a) / int64(b)); } else if (opcodeOffset == 5) { // rem_s if (b == 0) { - res = 0; - } else { - res = uint64(int64(a) % int64(b)); + mach.status = MachineStatus.ERRORED; + return; } + res = uint64(int64(a) % int64(b)); } else if (opcodeOffset == 10) { // shl res = a << (b % 64); @@ -341,7 +346,12 @@ contract OneStepProverMath is IOneStepProver { // rotr res = rotr64(a, b); } else { - res = genericBinOp(a, b, opcodeOffset); + bool err; + (res, err) = genericBinOp(a, b, opcodeOffset); + if (err) { + mach.status = MachineStatus.ERRORED; + return; + } } } diff --git a/contracts/src/osp/OneStepProverMemory.sol b/contracts/src/osp/OneStepProverMemory.sol index 7d2254d1b0..0135ef67d5 100644 --- a/contracts/src/osp/OneStepProverMemory.sol +++ b/contracts/src/osp/OneStepProverMemory.sol @@ -271,10 +271,9 @@ contract OneStepProverMemory is IOneStepProver { uint32 oldPages = uint32(mod.moduleMemory.size / PAGE_SIZE); uint32 growingPages = mach.valueStack.pop().assumeI32(); // Safe as the input integers are too small to overflow a uint256 - uint256 newSize = (uint256(oldPages) + uint256(growingPages)) * PAGE_SIZE; - // Note: we require the size remain *below* 2^32, meaning the actual limit is 2^32-PAGE_SIZE - if (newSize < (1 << 32)) { - mod.moduleMemory.size = uint64(newSize); + uint256 newSize = uint256(oldPages) + uint256(growingPages); + if (newSize <= mod.moduleMemory.maxSize) { + mod.moduleMemory.size = uint64(newSize * PAGE_SIZE); mach.valueStack.push(ValueLib.newI32(oldPages)); } else { mach.valueStack.push(ValueLib.newI32(~uint32(0))); diff --git a/contracts/src/precompiles/ArbAggregator.sol b/contracts/src/precompiles/ArbAggregator.sol index 53958a6be2..4c01f00b6e 100644 --- a/contracts/src/precompiles/ArbAggregator.sol +++ b/contracts/src/precompiles/ArbAggregator.sol @@ -7,45 +7,35 @@ pragma solidity >=0.4.21 <0.9.0; /// @title Provides aggregators and their users methods for configuring how they participate in L1 aggregation. /// @notice Precompiled contract that exists in every Arbitrum chain at 0x000000000000000000000000000000000000006d interface ArbAggregator { - /// @notice Get the preferred aggregator for an address. - /// @param addr The address to fetch aggregator for - /// @return (preferredAggregatorAddress, isDefault) - /// isDefault is true if addr is set to prefer the default aggregator + /// @notice Deprecated, customization of preferred aggregator is no longer supported + /// @notice Get the address of an arbitrarily chosen batch poster. + /// @param addr ignored + /// @return (batchPosterAddress, true) function getPreferredAggregator(address addr) external view returns (address, bool); - /// @notice Set the caller's preferred aggregator. - /// @param prefAgg If prefAgg is zero, this sets the caller to prefer the default aggregator - function setPreferredAggregator(address prefAgg) external; - + /// @notice Deprecated, there is no longer a single preferred aggregator, use getBatchPosters instead /// @notice Get default aggregator. function getDefaultAggregator() external view returns (address); - /// @notice Set the preferred aggregator. - /// This reverts unless called by the aggregator, its fee collector, or a chain owner - /// @param newDefault New default aggregator - function setDefaultAggregator(address newDefault) external; - - /// @notice Get the aggregator's compression ratio - /// @param aggregator The aggregator to fetch the compression ratio for - /// @return The compression ratio, measured in basis points - function getCompressionRatio(address aggregator) external view returns (uint64); - - /// @notice Set the aggregator's compression ratio - /// This reverts unless called by the aggregator, its fee collector, or a chain owner - /// @param aggregator The aggregator to set the compression ratio for - /// @param ratio The compression ratio, measured in basis points - function setCompressionRatio(address aggregator, uint64 ratio) external; - - /// @notice Get the address where fees to aggregator are sent. - /// @param aggregator The aggregator to get the fee collector for - /// @return The fee collectors address. This will often but not always be the same as the aggregator's address. - function getFeeCollector(address aggregator) external view returns (address); - - /// @notice Set the address where fees to aggregator are sent. - /// This reverts unless called by the aggregator, its fee collector, or a chain owner - /// @param aggregator The aggregator to set the fee collector for + /// @notice Get a list of all current batch posters + /// @return Batch poster addresses + function getBatchPosters() external view returns (address[] memory); + + /// @notice Adds newBatchPoster as a batch poster + /// This reverts unless called by a chain owner + /// @param newBatchPoster New batch poster + function addBatchPoster(address newBatchPoster) external; + + /// @notice Get the address where fees to batchPoster are sent. + /// @param batchPoster The batch poster to get the fee collector for + /// @return The fee collectors address. This will sometimes but not always be the same as the batch poster's address. + function getFeeCollector(address batchPoster) external view returns (address); + + /// @notice Set the address where fees to batchPoster are sent. + /// This reverts unless called by the batch poster, its fee collector, or a chain owner + /// @param batchPoster The batch poster to set the fee collector for /// @param newFeeCollector The new fee collector to set - function setFeeCollector(address aggregator, address newFeeCollector) external; + function setFeeCollector(address batchPoster, address newFeeCollector) external; /// @notice Deprecated, always returns zero /// @notice Get the tx base fee (in approximate L1 gas) for aggregator diff --git a/contracts/src/precompiles/ArbBLS.sol b/contracts/src/precompiles/ArbBLS.sol index 7415903cdc..c85d0c8d3c 100644 --- a/contracts/src/precompiles/ArbBLS.sol +++ b/contracts/src/precompiles/ArbBLS.sol @@ -4,50 +4,8 @@ pragma solidity >=0.4.21 <0.9.0; -/// @title Provides a registry of BLS public keys for accounts. +/// @title Disabled precompile, formerly used to register BLS public keys. /// @notice Precompiled contract that exists in every Arbitrum chain at 0x0000000000000000000000000000000000000067. interface ArbBLS { - /// @notice Deprecated -- equivalent to registerAltBN128 - function register( - uint256 x0, - uint256 x1, - uint256 y0, - uint256 y1 - ) external; // DEPRECATED - /// @notice Deprecated -- equivalent to getAltBN128 - function getPublicKey(address addr) - external - view - returns ( - uint256, - uint256, - uint256, - uint256 - ); // DEPRECATED - - /// @notice Associate an AltBN128 public key with the caller's address - function registerAltBN128( - uint256 x0, - uint256 x1, - uint256 y0, - uint256 y1 - ) external; - - /// @notice Get the AltBN128 public key associated with an address (revert if there isn't one) - function getAltBN128(address addr) - external - view - returns ( - uint256, - uint256, - uint256, - uint256 - ); - - /// @notice Associate a BLS 12-381 public key with the caller's address - function registerBLS12381(bytes calldata key) external; - - /// @notice Get the BLS 12-381 public key associated with an address (revert if there isn't one) - function getBLS12381(address addr) external view returns (bytes memory); } diff --git a/contracts/src/precompiles/ArbGasInfo.sol b/contracts/src/precompiles/ArbGasInfo.sol index d4c1dfa65e..cd6f223118 100644 --- a/contracts/src/precompiles/ArbGasInfo.sol +++ b/contracts/src/precompiles/ArbGasInfo.sol @@ -75,7 +75,7 @@ interface ArbGasInfo { uint256 ); - /// @notice Get the gas accounting parameters + /// @notice Get the gas accounting parameters. `gasPoolMax` is always zero, as the exponential pricing model has no such notion. /// @return (speedLimitPerSecond, gasPoolMax, maxTxGasLimit) function getGasAccountingParams() external @@ -89,21 +89,6 @@ interface ArbGasInfo { /// @notice Get the minimum gas price needed for a tx to succeed function getMinimumGasPrice() external view returns (uint256); - /// @notice Get the number of seconds worth of the speed limit the gas pool contains - function getGasPoolSeconds() external view returns (uint64); - - /// @notice Get the target fullness in bips the pricing model will try to keep the pool at - function getGasPoolTarget() external view returns (uint64); - - /// @notice Get the extent in bips to which the pricing model favors filling the pool over increasing speeds - function getGasPoolWeight() external view returns (uint64); - - /// @notice Get ArbOS's estimate of the amount of gas being burnt per second - function getRateEstimate() external view returns (uint64); - - /// @notice Get how slowly ArbOS updates its estimate the amount of gas being burnt per second - function getRateEstimateInertia() external view returns (uint64); - /// @notice Get ArbOS's estimate of the L1 basefee in wei function getL1BaseFeeEstimate() external view returns (uint256); @@ -116,9 +101,6 @@ interface ArbGasInfo { /// @notice Get L1 gas fees paid by the current transaction function getCurrentTxL1GasFees() external view returns (uint256); - /// @notice Get the amount of gas remaining in the gas pool - function getGasPool() external view returns (int64); - /// @notice Get the backlogged amount of gas burnt in excess of the speed limit function getGasBacklog() external view returns (uint64); diff --git a/contracts/src/precompiles/ArbOwner.sol b/contracts/src/precompiles/ArbOwner.sol index a5f26b2754..03a4325c1d 100644 --- a/contracts/src/precompiles/ArbOwner.sol +++ b/contracts/src/precompiles/ArbOwner.sol @@ -24,9 +24,6 @@ interface ArbOwner { /// @notice Retrieves the list of chain owners function getAllChainOwners() external view returns (address[] memory); - /// @notice Set the L1 basefee estimate directly, bypassing the autoregression - function setL1BaseFeeEstimate(uint256 priceInWei) external; - /// @notice Set how slowly ArbOS updates its estimate of the L1 basefee function setL1BaseFeeEstimateInertia(uint64 inertia) external; @@ -39,18 +36,6 @@ interface ArbOwner { /// @notice Set the computational speed limit for the chain function setSpeedLimit(uint64 limit) external; - /// @notice Set the number of seconds worth of the speed limit the gas pool contains - function setGasPoolSeconds(uint64 factor) external; - - /// @notice Set the target fullness in bips the pricing model will try to keep the pool at - function setGasPoolTarget(uint64 target) external; - - /// @notice Set the extent in bips to which the pricing model favors filling the pool over increasing speeds - function setGasPoolWeight(uint64 weight) external; - - /// @notice Set how slowly ArbOS updates its estimate the amount of gas being burnt per second - function setRateEstimateInertia(uint64 inertia) external; - /// @notice Set the maximum size a tx (and block) can be function setMaxTxGasLimit(uint64 limit) external; @@ -69,6 +54,18 @@ interface ArbOwner { /// @notice Upgrades ArbOS to the requested version at the requested timestamp function scheduleArbOSUpgrade(uint64 newVersion, uint64 timestamp) external; + /// @notice Sets equilibration units parameter for L1 price adjustment algorithm + function setL1PricingEquilibrationUnits(uint256 equilibrationUnits) external; + + /// @notice Sets inertia parameter for L1 price adjustment algorithm + function setL1PricingInertia(uint64 inertia) external; + + /// @notice Sets reward recipient address for L1 price adjustment algorithm + function setL1PricingRewardRecipient(address recipient) external; + + /// @notice Sets reward amount for L1 price adjustment algorithm, in wei per unit + function setL1PricingRewardRate(uint64 weiPerUnit) external; + // Emitted when a successful call is made to this precompile event OwnerActs(bytes4 indexed method, address indexed owner, bytes data); } diff --git a/contracts/src/precompiles/ArbRetryableTx.sol b/contracts/src/precompiles/ArbRetryableTx.sol index fc7a6199ee..3de17ea12c 100644 --- a/contracts/src/precompiles/ArbRetryableTx.sol +++ b/contracts/src/precompiles/ArbRetryableTx.sol @@ -55,6 +55,13 @@ interface ArbRetryableTx { */ function cancel(bytes32 ticketId) external; + /** + * @notice Gets the redeemer of the current retryable redeem attempt. + * Returns the zero address if the current transaction is not a retryable redeem attempt. + * If this is an auto-redeem, returns the fee refund address of the retryable. + */ + function getCurrentRedeemer() external view returns (address); + /** * @notice Do not call. This method represents a retryable submission to aid explorers. * Calling it will always revert. @@ -80,7 +87,9 @@ interface ArbRetryableTx { bytes32 indexed retryTxHash, uint64 indexed sequenceNum, uint64 donatedGas, - address gasDonor + address gasDonor, + uint256 maxRefund, + uint256 submissionFeeRefund ); event Canceled(bytes32 indexed ticketId); diff --git a/contracts/src/precompiles/ArbosActs.sol b/contracts/src/precompiles/ArbosActs.sol index c4258fab88..7ca71b6518 100644 --- a/contracts/src/precompiles/ArbosActs.sol +++ b/contracts/src/precompiles/ArbosActs.sol @@ -26,16 +26,23 @@ interface ArbosActs { /** * @notice ArbOS "calls" this when starting a block * @param l1BaseFee the L1 BaseFee - * @param l2BaseFeeLastBlock the L2 BaseFee in the last block's header * @param l1BlockNumber the L1 block number * @param timePassed number of seconds since the last block */ function startBlock( uint256 l1BaseFee, - uint256 l2BaseFeeLastBlock, uint64 l1BlockNumber, + uint64 l2BlockNumber, uint64 timePassed ) external; + function batchPostingReport( + uint256 batchTimestamp, + address batchPosterAddress, + uint64 batchNumber, + uint64 batchDataGas, + uint256 l1BaseFeeWei + ) external; + error CallerNotArbOS(); } diff --git a/contracts/src/rollup/BridgeCreator.sol b/contracts/src/rollup/BridgeCreator.sol index e684320280..4c2c7ba7d7 100644 --- a/contracts/src/rollup/BridgeCreator.sol +++ b/contracts/src/rollup/BridgeCreator.sol @@ -9,40 +9,40 @@ import "../bridge/SequencerInbox.sol"; import "../bridge/ISequencerInbox.sol"; import "../bridge/Inbox.sol"; import "../bridge/Outbox.sol"; -import "./RollupEventBridge.sol"; +import "./RollupEventInbox.sol"; import "../bridge/IBridge.sol"; import "@openzeppelin/contracts/access/Ownable.sol"; import "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; contract BridgeCreator is Ownable { - Bridge public delayedBridgeTemplate; + Bridge public bridgeTemplate; SequencerInbox public sequencerInboxTemplate; Inbox public inboxTemplate; - RollupEventBridge public rollupEventBridgeTemplate; + RollupEventInbox public rollupEventInboxTemplate; Outbox public outboxTemplate; event TemplatesUpdated(); constructor() Ownable() { - delayedBridgeTemplate = new Bridge(); + bridgeTemplate = new Bridge(); sequencerInboxTemplate = new SequencerInbox(); inboxTemplate = new Inbox(); - rollupEventBridgeTemplate = new RollupEventBridge(); + rollupEventInboxTemplate = new RollupEventInbox(); outboxTemplate = new Outbox(); } function updateTemplates( - address _delayedBridgeTemplate, + address _bridgeTemplate, address _sequencerInboxTemplate, address _inboxTemplate, - address _rollupEventBridgeTemplate, + address _rollupEventInboxTemplate, address _outboxTemplate ) external onlyOwner { - delayedBridgeTemplate = Bridge(_delayedBridgeTemplate); + bridgeTemplate = Bridge(_bridgeTemplate); sequencerInboxTemplate = SequencerInbox(_sequencerInboxTemplate); inboxTemplate = Inbox(_inboxTemplate); - rollupEventBridgeTemplate = RollupEventBridge(_rollupEventBridgeTemplate); + rollupEventInboxTemplate = RollupEventInbox(_rollupEventInboxTemplate); outboxTemplate = Outbox(_outboxTemplate); emit TemplatesUpdated(); @@ -50,10 +50,10 @@ contract BridgeCreator is Ownable { struct CreateBridgeFrame { ProxyAdmin admin; - Bridge delayedBridge; + Bridge bridge; SequencerInbox sequencerInbox; Inbox inbox; - RollupEventBridge rollupEventBridge; + RollupEventInbox rollupEventInbox; Outbox outbox; } @@ -67,16 +67,14 @@ contract BridgeCreator is Ownable { Bridge, SequencerInbox, Inbox, - RollupEventBridge, + RollupEventInbox, Outbox ) { CreateBridgeFrame memory frame; { - frame.delayedBridge = Bridge( - address( - new TransparentUpgradeableProxy(address(delayedBridgeTemplate), adminProxy, "") - ) + frame.bridge = Bridge( + address(new TransparentUpgradeableProxy(address(bridgeTemplate), adminProxy, "")) ); frame.sequencerInbox = SequencerInbox( address( @@ -86,10 +84,10 @@ contract BridgeCreator is Ownable { frame.inbox = Inbox( address(new TransparentUpgradeableProxy(address(inboxTemplate), adminProxy, "")) ); - frame.rollupEventBridge = RollupEventBridge( + frame.rollupEventInbox = RollupEventInbox( address( new TransparentUpgradeableProxy( - address(rollupEventBridgeTemplate), + address(rollupEventInboxTemplate), adminProxy, "" ) @@ -100,20 +98,17 @@ contract BridgeCreator is Ownable { ); } - frame.delayedBridge.initialize(); - frame.sequencerInbox.initialize(IBridge(frame.delayedBridge), rollup, maxTimeVariation); - frame.inbox.initialize(IBridge(frame.delayedBridge)); - frame.rollupEventBridge.initialize(address(frame.delayedBridge), rollup); - frame.outbox.initialize(rollup, IBridge(frame.delayedBridge)); - - frame.delayedBridge.setInbox(address(frame.inbox), true); - frame.delayedBridge.transferOwnership(rollup); + frame.bridge.initialize(IOwnable(rollup)); + frame.sequencerInbox.initialize(IBridge(frame.bridge), maxTimeVariation); + frame.inbox.initialize(IBridge(frame.bridge), ISequencerInbox(frame.sequencerInbox)); + frame.rollupEventInbox.initialize(IBridge(frame.bridge)); + frame.outbox.initialize(IBridge(frame.bridge)); return ( - frame.delayedBridge, + frame.bridge, frame.sequencerInbox, frame.inbox, - frame.rollupEventBridge, + frame.rollupEventInbox, frame.outbox ); } diff --git a/contracts/src/rollup/IRollupCore.sol b/contracts/src/rollup/IRollupCore.sol index 8617aa4f77..62a3298f5d 100644 --- a/contracts/src/rollup/IRollupCore.sol +++ b/contracts/src/rollup/IRollupCore.sol @@ -59,13 +59,13 @@ interface IRollupCore { function wasmModuleRoot() external view returns (bytes32); - function delayedBridge() external view returns (IBridge); + function bridge() external view returns (IBridge); - function sequencerBridge() external view returns (ISequencerInbox); + function sequencerInbox() external view returns (ISequencerInbox); function outbox() external view returns (IOutbox); - function rollupEventBridge() external view returns (IRollupEventBridge); + function rollupEventInbox() external view returns (IRollupEventInbox); function challengeManager() external view returns (IChallengeManager); diff --git a/contracts/src/rollup/IRollupEventBridge.sol b/contracts/src/rollup/IRollupEventInbox.sol similarity index 79% rename from contracts/src/rollup/IRollupEventBridge.sol rename to contracts/src/rollup/IRollupEventInbox.sol index 2af93e5b28..c2982de828 100644 --- a/contracts/src/rollup/IRollupEventBridge.sol +++ b/contracts/src/rollup/IRollupEventInbox.sol @@ -6,10 +6,10 @@ pragma solidity ^0.8.0; import "../bridge/IBridge.sol"; -interface IRollupEventBridge { +interface IRollupEventInbox { function bridge() external view returns (IBridge); - function initialize(address _bridge, address _rollup) external; + function initialize(IBridge _bridge) external; function rollup() external view returns (address); diff --git a/contracts/src/rollup/IRollupLogic.sol b/contracts/src/rollup/IRollupLogic.sol index 83c8f3aef4..8b8bfab2f4 100644 --- a/contracts/src/rollup/IRollupLogic.sol +++ b/contracts/src/rollup/IRollupLogic.sol @@ -8,8 +8,9 @@ import "./RollupLib.sol"; import "./IRollupCore.sol"; import "../bridge/ISequencerInbox.sol"; import "../bridge/IOutbox.sol"; +import "../bridge/IOwnable.sol"; -interface IRollupUserAbs is IRollupCore { +interface IRollupUserAbs is IRollupCore, IOwnable { /// @dev the user logic just validated configuration and shouldn't write to state during init /// this allows the admin logic to ensure consistency on parameters. function initialize(address stakeToken) external view; @@ -54,8 +55,6 @@ interface IRollupUserAbs is IRollupCore { function withdrawStakerFunds() external returns (uint256); - function owner() external view returns (address); - function createChallenge( address[2] calldata stakers, uint64[2] calldata nodeNums, @@ -120,7 +119,7 @@ interface IRollupAdmin { * @param _inbox Inbox contract to add or remove * @param _enabled New status of inbox */ - function setInbox(address _inbox, bool _enabled) external; + function setDelayedInbox(address _inbox, bool _enabled) external; /** * @notice Pause interaction with the rollup contract @@ -210,4 +209,10 @@ interface IRollupAdmin { * @param newWasmModuleRoot new module root */ function setWasmModuleRoot(bytes32 newWasmModuleRoot) external; + + /** + * @notice set a new sequencer inbox contract + * @param _sequencerInbox new address of sequencer inbox + */ + function setSequencerInbox(address _sequencerInbox) external; } diff --git a/contracts/src/rollup/RollupAdminLogic.sol b/contracts/src/rollup/RollupAdminLogic.sol index e2b40c61ec..d266775802 100644 --- a/contracts/src/rollup/RollupAdminLogic.sol +++ b/contracts/src/rollup/RollupAdminLogic.sol @@ -21,16 +21,26 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, SecondaryLogicUUPSUpgrade onlyProxy initializer { - delayedBridge = connectedContracts.delayedBridge; - sequencerBridge = connectedContracts.sequencerInbox; + rollupDeploymentBlock = block.number; + bridge = connectedContracts.bridge; + sequencerInbox = connectedContracts.sequencerInbox; + connectedContracts.bridge.setDelayedInbox(address(connectedContracts.inbox), true); + connectedContracts.bridge.setSequencerInbox(address(connectedContracts.sequencerInbox)); + + inbox = connectedContracts.inbox; outbox = connectedContracts.outbox; - delayedBridge.setOutbox(address(connectedContracts.outbox), true); - rollupEventBridge = connectedContracts.rollupEventBridge; - delayedBridge.setInbox(address(connectedContracts.rollupEventBridge), true); + connectedContracts.bridge.setOutbox(address(connectedContracts.outbox), true); + rollupEventInbox = connectedContracts.rollupEventInbox; + connectedContracts.bridge.setDelayedInbox( + address(connectedContracts.rollupEventInbox), + true + ); - rollupEventBridge.rollupInitialized(config.chainId); - sequencerBridge.addSequencerL2Batch(0, "", 1, IGasRefunder(address(0))); + connectedContracts.rollupEventInbox.rollupInitialized(config.chainId); + connectedContracts.sequencerInbox.addSequencerL2Batch(0, "", 1, IGasRefunder(address(0))); + validatorUtils = connectedContracts.validatorUtils; + validatorWalletCreator = connectedContracts.validatorWalletCreator; challengeManager = connectedContracts.challengeManager; Node memory node = createInitialNode(); @@ -84,7 +94,7 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, SecondaryLogicUUPSUpgrade */ function setOutbox(IOutbox _outbox) external override { outbox = _outbox; - delayedBridge.setOutbox(address(_outbox), true); + bridge.setOutbox(address(_outbox), true); emit OwnerFunctionCalled(0); } @@ -94,7 +104,7 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, SecondaryLogicUUPSUpgrade */ function removeOldOutbox(address _outbox) external override { require(_outbox != address(outbox), "CUR_OUTBOX"); - delayedBridge.setOutbox(_outbox, false); + bridge.setOutbox(_outbox, false); emit OwnerFunctionCalled(1); } @@ -103,8 +113,8 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, SecondaryLogicUUPSUpgrade * @param _inbox Inbox contract to add or remove * @param _enabled New status of inbox */ - function setInbox(address _inbox, bool _enabled) external override { - delayedBridge.setInbox(address(_inbox), _enabled); + function setDelayedInbox(address _inbox, bool _enabled) external override { + bridge.setDelayedInbox(address(_inbox), _enabled); emit OwnerFunctionCalled(2); } @@ -296,4 +306,13 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, SecondaryLogicUUPSUpgrade wasmModuleRoot = newWasmModuleRoot; emit OwnerFunctionCalled(26); } + + /** + * @notice set a new sequencer inbox contract + * @param _sequencerInbox new address of sequencer inbox + */ + function setSequencerInbox(address _sequencerInbox) external override { + bridge.setSequencerInbox(_sequencerInbox); + emit OwnerFunctionCalled(27); + } } diff --git a/contracts/src/rollup/RollupCore.sol b/contracts/src/rollup/RollupCore.sol index f460950406..c8ea7080c7 100644 --- a/contracts/src/rollup/RollupCore.sol +++ b/contracts/src/rollup/RollupCore.sol @@ -9,7 +9,7 @@ import "@openzeppelin/contracts-upgradeable/security/PausableUpgradeable.sol"; import "./Node.sol"; import "./IRollupCore.sol"; import "./RollupLib.sol"; -import "./IRollupEventBridge.sol"; +import "./IRollupEventInbox.sol"; import "./IRollupCore.sol"; import "../challenge/IChallengeManager.sol"; @@ -31,11 +31,17 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { uint256 public baseStake; bytes32 public wasmModuleRoot; - IBridge public delayedBridge; - ISequencerInbox public sequencerBridge; + IInbox public inbox; + IBridge public bridge; IOutbox public outbox; - IRollupEventBridge public rollupEventBridge; + ISequencerInbox public sequencerInbox; + IRollupEventInbox public rollupEventInbox; IChallengeManager public override challengeManager; + + // misc useful contracts when interacting with the rollup + address public validatorUtils; + address public validatorWalletCreator; + // when a staker loses a challenge, half of their funds get escrowed in this address address public loserStakeEscrow; address public stakeToken; @@ -63,6 +69,7 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { mapping(address => uint256) private _withdrawableFunds; uint256 public totalWithdrawableFunds; + uint256 public rollupDeploymentBlock; // The node number of the initial node uint64 internal constant GENESIS_NODE = 0; @@ -529,7 +536,7 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { { // validate data memoryFrame.prevNode = getNode(prevNodeNum); - memoryFrame.currentInboxSize = sequencerBridge.batchCount(); + memoryFrame.currentInboxSize = bridge.sequencerMessageCount(); // Make sure the previous state is correct against the node being built on require( @@ -560,7 +567,7 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { require(afterInboxCount <= memoryFrame.currentInboxSize, "INBOX_PAST_END"); // This gives replay protection against the state of the inbox if (afterInboxCount > 0) { - memoryFrame.sequencerBatchAcc = sequencerBridge.inboxAccs(afterInboxCount - 1); + memoryFrame.sequencerBatchAcc = bridge.sequencerInboxAccs(afterInboxCount - 1); } } @@ -582,7 +589,8 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { memoryFrame.hasSibling, memoryFrame.lastHash, memoryFrame.executionHash, - memoryFrame.sequencerBatchAcc + memoryFrame.sequencerBatchAcc, + wasmModuleRoot ); require(newNodeHash == expectedNodeHash, "UNEXPECTED_NODE_HASH"); diff --git a/contracts/src/rollup/RollupCreator.sol b/contracts/src/rollup/RollupCreator.sol index 29b3343653..6b6026dcb7 100644 --- a/contracts/src/rollup/RollupCreator.sol +++ b/contracts/src/rollup/RollupCreator.sol @@ -18,7 +18,7 @@ contract RollupCreator is Ownable { address inboxAddress, address adminProxy, address sequencerInbox, - address delayedBridge + address bridge ); event TemplatesUpdated(); @@ -28,6 +28,9 @@ contract RollupCreator is Ownable { IRollupAdmin public rollupAdminLogic; IRollupUser public rollupUserLogic; + address public validatorUtils; + address public validatorWalletCreator; + constructor() Ownable() {} function setTemplates( @@ -35,22 +38,26 @@ contract RollupCreator is Ownable { IOneStepProofEntry _osp, IChallengeManager _challengeManagerLogic, IRollupAdmin _rollupAdminLogic, - IRollupUser _rollupUserLogic + IRollupUser _rollupUserLogic, + address _validatorUtils, + address _validatorWalletCreator ) external onlyOwner { bridgeCreator = _bridgeCreator; osp = _osp; challengeManagerTemplate = _challengeManagerLogic; rollupAdminLogic = _rollupAdminLogic; rollupUserLogic = _rollupUserLogic; + validatorUtils = _validatorUtils; + validatorWalletCreator = _validatorWalletCreator; emit TemplatesUpdated(); } struct CreateRollupFrame { ProxyAdmin admin; - IBridge delayedBridge; + IBridge bridge; ISequencerInbox sequencerInbox; IInbox inbox; - IRollupEventBridge rollupEventBridge; + IRollupEventInbox rollupEventInbox; IOutbox outbox; ArbitrumProxy rollup; } @@ -68,10 +75,10 @@ contract RollupCreator is Ownable { frame.admin = new ProxyAdmin(); ( - frame.delayedBridge, + frame.bridge, frame.sequencerInbox, frame.inbox, - frame.rollupEventBridge, + frame.rollupEventInbox, frame.outbox ) = bridgeCreator.createBridge( address(frame.admin), @@ -93,20 +100,23 @@ contract RollupCreator is Ownable { challengeManager.initialize( IChallengeResultReceiver(expectedRollupAddr), frame.sequencerInbox, - frame.delayedBridge, + frame.bridge, osp ); frame.rollup = new ArbitrumProxy( config, ContractDependencies({ - delayedBridge: frame.delayedBridge, + bridge: frame.bridge, sequencerInbox: frame.sequencerInbox, + inbox: frame.inbox, outbox: frame.outbox, - rollupEventBridge: frame.rollupEventBridge, + rollupEventInbox: frame.rollupEventInbox, challengeManager: challengeManager, rollupAdminLogic: rollupAdminLogic, - rollupUserLogic: rollupUserLogic + rollupUserLogic: rollupUserLogic, + validatorUtils: validatorUtils, + validatorWalletCreator: validatorWalletCreator }) ); require(address(frame.rollup) == expectedRollupAddr, "WRONG_ROLLUP_ADDR"); @@ -116,7 +126,7 @@ contract RollupCreator is Ownable { address(frame.inbox), address(frame.admin), address(frame.sequencerInbox), - address(frame.delayedBridge) + address(frame.bridge) ); return address(frame.rollup); } diff --git a/contracts/src/rollup/RollupEventBridge.sol b/contracts/src/rollup/RollupEventInbox.sol similarity index 64% rename from contracts/src/rollup/RollupEventBridge.sol rename to contracts/src/rollup/RollupEventInbox.sol index c541e8156e..2008de3d93 100644 --- a/contracts/src/rollup/RollupEventBridge.sol +++ b/contracts/src/rollup/RollupEventInbox.sol @@ -4,36 +4,36 @@ pragma solidity ^0.8.0; -import "./IRollupEventBridge.sol"; +import "./IRollupEventInbox.sol"; import "../bridge/IBridge.sol"; -import "../bridge/IMessageProvider.sol"; +import "../bridge/IDelayedMessageProvider.sol"; import "../libraries/DelegateCallAware.sol"; import {INITIALIZATION_MSG_TYPE} from "../libraries/MessageTypes.sol"; /** * @title The inbox for rollup protocol events */ -contract RollupEventBridge is IRollupEventBridge, IMessageProvider, DelegateCallAware { +contract RollupEventInbox is IRollupEventInbox, IDelayedMessageProvider, DelegateCallAware { uint8 internal constant CREATE_NODE_EVENT = 0; uint8 internal constant CONFIRM_NODE_EVENT = 1; uint8 internal constant REJECT_NODE_EVENT = 2; uint8 internal constant STAKE_CREATED_EVENT = 3; - IBridge public bridge; - address public rollup; + IBridge public override bridge; + address public override rollup; modifier onlyRollup() { require(msg.sender == rollup, "ONLY_ROLLUP"); _; } - function initialize(address _bridge, address _rollup) external onlyDelegated { - require(rollup == address(0), "ALREADY_INIT"); - bridge = IBridge(_bridge); - rollup = _rollup; + function initialize(IBridge _bridge) external override onlyDelegated { + require(address(bridge) == address(0), "ALREADY_INIT"); + bridge = _bridge; + rollup = address(_bridge.rollup()); } - function rollupInitialized(uint256 chainId) external onlyRollup { + function rollupInitialized(uint256 chainId) external override onlyRollup { bytes memory initMsg = abi.encodePacked(chainId); uint256 num = bridge.enqueueDelayedMessage( INITIALIZATION_MSG_TYPE, diff --git a/contracts/src/rollup/RollupLib.sol b/contracts/src/rollup/RollupLib.sol index 3d12b7f03a..93f51f91cf 100644 --- a/contracts/src/rollup/RollupLib.sol +++ b/contracts/src/rollup/RollupLib.sol @@ -11,7 +11,8 @@ import "../bridge/ISequencerInbox.sol"; import "../bridge/IBridge.sol"; import "../bridge/IOutbox.sol"; -import "./IRollupEventBridge.sol"; +import "../bridge/IInbox.sol"; +import "./IRollupEventInbox.sol"; import "./IRollupLogic.sol"; struct Config { @@ -27,13 +28,17 @@ struct Config { } struct ContractDependencies { - IBridge delayedBridge; + IBridge bridge; ISequencerInbox sequencerInbox; + IInbox inbox; IOutbox outbox; - IRollupEventBridge rollupEventBridge; + IRollupEventInbox rollupEventInbox; IChallengeManager challengeManager; IRollupAdmin rollupAdminLogic; IRollupUser rollupUserLogic; + // misc contracts that are useful when interacting with the rollup + address validatorUtils; + address validatorWalletCreator; } library RollupLib { @@ -127,9 +132,19 @@ library RollupLib { bool hasSibling, bytes32 lastHash, bytes32 assertionExecHash, - bytes32 inboxAcc + bytes32 inboxAcc, + bytes32 wasmModuleRoot ) internal pure returns (bytes32) { uint8 hasSiblingInt = hasSibling ? 1 : 0; - return keccak256(abi.encodePacked(hasSiblingInt, lastHash, assertionExecHash, inboxAcc)); + return + keccak256( + abi.encodePacked( + hasSiblingInt, + lastHash, + assertionExecHash, + inboxAcc, + wasmModuleRoot + ) + ); } } diff --git a/contracts/src/state/Deserialize.sol b/contracts/src/state/Deserialize.sol index 96f2da7159..8c98baa168 100644 --- a/contracts/src/state/Deserialize.sol +++ b/contracts/src/state/Deserialize.sol @@ -6,7 +6,6 @@ pragma solidity ^0.8.0; import "./Value.sol"; import "./ValueStack.sol"; -import "./PcStack.sol"; import "./Machine.sol"; import "./Instructions.sol"; import "./StackFrame.sol"; @@ -120,23 +119,6 @@ library Deserialize { stack = ValueStack({proved: ValueArray(proved), remainingHash: remainingHash}); } - function pcStack(bytes calldata proof, uint256 startOffset) - internal - pure - returns (PcStack memory stack, uint256 offset) - { - offset = startOffset; - bytes32 remainingHash; - (remainingHash, offset) = b32(proof, offset); - uint256 provedLength; - (provedLength, offset) = u256(proof, offset); - uint32[] memory proved = new uint32[](provedLength); - for (uint256 i = 0; i < proved.length; i++) { - (proved[i], offset) = u32(proof, offset); - } - stack = PcStack({proved: PcArray(proved), remainingHash: remainingHash}); - } - function instruction(bytes calldata proof, uint256 startOffset) internal pure @@ -199,10 +181,12 @@ library Deserialize { { offset = startOffset; uint64 size; + uint64 maxSize; bytes32 root; (size, offset) = u64(proof, offset); + (maxSize, offset) = u64(proof, offset); (root, offset) = b32(proof, offset); - mem = ModuleMemory({size: size, merkleRoot: root}); + mem = ModuleMemory({size: size, maxSize: maxSize, merkleRoot: root}); } function module(bytes calldata proof, uint256 startOffset) @@ -274,7 +258,6 @@ library Deserialize { } ValueStack memory values; ValueStack memory internalStack; - PcStack memory blocks; bytes32 globalStateHash; uint32 moduleIdx; uint32 functionIdx; @@ -283,7 +266,6 @@ library Deserialize { bytes32 modulesRoot; (values, offset) = valueStack(proof, offset); (internalStack, offset) = valueStack(proof, offset); - (blocks, offset) = pcStack(proof, offset); (frameStack, offset) = stackFrameWindow(proof, offset); (globalStateHash, offset) = b32(proof, offset); (moduleIdx, offset) = u32(proof, offset); @@ -294,7 +276,6 @@ library Deserialize { status: status, valueStack: values, internalStack: internalStack, - blockStack: blocks, frameStack: frameStack, globalStateHash: globalStateHash, moduleIdx: moduleIdx, diff --git a/contracts/src/state/Instructions.sol b/contracts/src/state/Instructions.sol index 625da095c6..196899c93f 100644 --- a/contracts/src/state/Instructions.sol +++ b/contracts/src/state/Instructions.sol @@ -12,9 +12,6 @@ struct Instruction { library Instructions { uint16 internal constant UNREACHABLE = 0x00; uint16 internal constant NOP = 0x01; - uint16 internal constant BLOCK = 0x02; - uint16 internal constant BRANCH = 0x0C; - uint16 internal constant BRANCH_IF = 0x0D; uint16 internal constant RETURN = 0x0F; uint16 internal constant CALL = 0x10; uint16 internal constant CALL_INDIRECT = 0x11; @@ -129,14 +126,11 @@ library Instructions { uint16 internal constant I64_EXTEND_16S = 0xC3; uint16 internal constant I64_EXTEND_32S = 0xC4; - uint16 internal constant END_BLOCK = 0x8000; - uint16 internal constant END_BLOCK_IF = 0x8001; uint16 internal constant INIT_FRAME = 0x8002; - uint16 internal constant ARBITRARY_JUMP_IF = 0x8003; - uint16 internal constant PUSH_STACK_BOUNDARY = 0x8004; + uint16 internal constant ARBITRARY_JUMP = 0x8003; + uint16 internal constant ARBITRARY_JUMP_IF = 0x8004; uint16 internal constant MOVE_FROM_STACK_TO_INTERNAL = 0x8005; uint16 internal constant MOVE_FROM_INTERNAL_TO_STACK = 0x8006; - uint16 internal constant IS_STACK_BOUNDARY = 0x8007; uint16 internal constant DUP = 0x8008; uint16 internal constant CROSS_MODULE_CALL = 0x8009; uint16 internal constant CALLER_MODULE_INTERNAL_CALL = 0x800A; @@ -150,8 +144,6 @@ library Instructions { uint16 internal constant READ_INBOX_MESSAGE = 0x8021; uint16 internal constant HALT_AND_SET_FINISHED = 0x8022; - uint16 internal constant ARBITRARY_JUMP = 0x8023; - uint256 internal constant INBOX_INDEX_SEQUENCER = 0; uint256 internal constant INBOX_INDEX_DELAYED = 1; diff --git a/contracts/src/state/Machine.sol b/contracts/src/state/Machine.sol index 1e5a1b4749..a7a5e9273d 100644 --- a/contracts/src/state/Machine.sol +++ b/contracts/src/state/Machine.sol @@ -5,7 +5,6 @@ pragma solidity ^0.8.0; import "./ValueStack.sol"; -import "./PcStack.sol"; import "./Instructions.sol"; import "./StackFrame.sol"; @@ -20,7 +19,6 @@ struct Machine { MachineStatus status; ValueStack valueStack; ValueStack internalStack; - PcStack blockStack; StackFrameWindow frameStack; bytes32 globalStateHash; uint32 moduleIdx; @@ -30,7 +28,6 @@ struct Machine { } library MachineLib { - using PcStackLib for PcStack; using StackFrameLib for StackFrameWindow; using ValueStackLib for ValueStack; @@ -43,7 +40,6 @@ library MachineLib { "Machine running:", mach.valueStack.hash(), mach.internalStack.hash(), - mach.blockStack.hash(), mach.frameStack.hash(), mach.globalStateHash, mach.moduleIdx, diff --git a/contracts/src/state/ModuleMemory.sol b/contracts/src/state/ModuleMemory.sol index 362da7fdcf..c1f0adb103 100644 --- a/contracts/src/state/ModuleMemory.sol +++ b/contracts/src/state/ModuleMemory.sol @@ -9,6 +9,7 @@ import "./Deserialize.sol"; struct ModuleMemory { uint64 size; + uint64 maxSize; bytes32 merkleRoot; } @@ -16,7 +17,7 @@ library ModuleMemoryLib { using MerkleProofLib for MerkleProof; function hash(ModuleMemory memory mem) internal pure returns (bytes32) { - return keccak256(abi.encodePacked("Memory:", mem.size, mem.merkleRoot)); + return keccak256(abi.encodePacked("Memory:", mem.size, mem.maxSize, mem.merkleRoot)); } function proveLeaf( diff --git a/contracts/src/state/PcStack.sol b/contracts/src/state/PcStack.sol deleted file mode 100644 index f8d75a385b..0000000000 --- a/contracts/src/state/PcStack.sol +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE -// SPDX-License-Identifier: BUSL-1.1 - -pragma solidity ^0.8.0; - -import "./PcArray.sol"; - -struct PcStack { - PcArray proved; - bytes32 remainingHash; -} - -library PcStackLib { - using PcArrayLib for PcArray; - - function hash(PcStack memory stack) internal pure returns (bytes32 h) { - h = stack.remainingHash; - uint256 len = stack.proved.length(); - for (uint256 i = 0; i < len; i++) { - h = keccak256(abi.encodePacked("Program counter stack:", stack.proved.get(i), h)); - } - } - - function pop(PcStack memory stack) internal pure returns (uint32) { - return stack.proved.pop(); - } - - function push(PcStack memory stack, uint32 val) internal pure { - return stack.proved.push(val); - } -} diff --git a/contracts/src/state/Value.sol b/contracts/src/state/Value.sol index 3784eab669..6e0a837b2b 100644 --- a/contracts/src/state/Value.sol +++ b/contracts/src/state/Value.sol @@ -11,8 +11,7 @@ enum ValueType { F64, REF_NULL, FUNC_REF, - INTERNAL_REF, - STACK_BOUNDARY + INTERNAL_REF } struct Value { @@ -26,7 +25,7 @@ library ValueLib { } function maxValueType() internal pure returns (ValueType) { - return ValueType.STACK_BOUNDARY; + return ValueType.INTERNAL_REF; } function assumeI32(Value memory val) internal pure returns (uint32) { diff --git a/contracts/src/test-helpers/BridgeTester.sol b/contracts/src/test-helpers/BridgeTester.sol index 03802763c5..922e055ce9 100644 --- a/contracts/src/test-helpers/BridgeTester.sol +++ b/contracts/src/test-helpers/BridgeTester.sol @@ -4,7 +4,7 @@ pragma solidity ^0.8.4; -import "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; import "../bridge/IBridge.sol"; @@ -18,7 +18,7 @@ import "../libraries/DelegateCallAware.sol"; * Since the escrow is held here, this contract also contains a list of allowed * outboxes that can make calls from here and withdraw this escrow. */ -contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { +contract BridgeTester is Initializable, DelegateCallAware, IBridge { using AddressUpgradeable for address; struct InOutInfo { @@ -29,19 +29,39 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { mapping(address => InOutInfo) private allowedInboxesMap; mapping(address => InOutInfo) private allowedOutboxesMap; - address[] public allowedInboxList; + address[] public allowedDelayedInboxList; address[] public allowedOutboxList; address private _activeOutbox; + IOwnable public rollup; + address public sequencerInbox; + + modifier onlyRollupOrOwner() { + if (msg.sender != address(rollup)) { + address rollupOwner = rollup.owner(); + if (msg.sender != rollupOwner) { + revert NotRollupOrOwner(msg.sender, address(rollup), rollupOwner); + } + } + _; + } + + function setSequencerInbox(address _sequencerInbox) external override onlyRollupOrOwner { + sequencerInbox = _sequencerInbox; + emit SequencerInboxUpdated(_sequencerInbox); + } + /// @dev Accumulator for delayed inbox messages; tail represents hash of the current state; each element represents the inclusion of a new message. - bytes32[] public override inboxAccs; + bytes32[] public override delayedInboxAccs; + + bytes32[] public override sequencerInboxAccs; address private constant EMPTY_ACTIVEOUTBOX = address(type(uint160).max); - function initialize() external initializer { + function initialize(IOwnable rollup_) external initializer { _activeOutbox = EMPTY_ACTIVEOUTBOX; - __Ownable_init(); + rollup = rollup_; } function activeOutbox() public view returns (address) { @@ -49,7 +69,7 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { return _activeOutbox; } - function allowedInboxes(address inbox) external view override returns (bool) { + function allowedDelayedInboxes(address inbox) external view override returns (bool) { return allowedInboxesMap[inbox].allowed; } @@ -57,6 +77,25 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { return allowedOutboxesMap[outbox].allowed; } + function enqueueSequencerMessage(bytes32 dataHash, uint256 afterDelayedMessagesRead) + external + returns ( + uint256 seqMessageIndex, + bytes32 beforeAcc, + bytes32 delayedAcc, + bytes32 acc + ) + { + // TODO: implement stub logic + } + + function submitBatchSpendingReport(address batchPoster, bytes32 dataHash) + external + returns (uint256) + { + // TODO: implement stub + } + /** * @dev Enqueue a message in the delayed inbox accumulator. * These messages are later sequenced in the SequencerInbox, either by the sequencer as @@ -67,9 +106,9 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { address sender, bytes32 messageDataHash ) external payable override returns (uint256) { - if (!allowedInboxesMap[msg.sender].allowed) revert NotInbox(msg.sender); + if (!allowedInboxesMap[msg.sender].allowed) revert NotDelayedInbox(msg.sender); return - addMessageToAccumulator( + addMessageToDelayedAccumulator( kind, sender, uint64(block.number), @@ -79,7 +118,7 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { ); } - function addMessageToAccumulator( + function addMessageToDelayedAccumulator( uint8 kind, address sender, uint64 blockNumber, @@ -87,7 +126,7 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { uint256 baseFeeL1, bytes32 messageDataHash ) internal returns (uint256) { - uint256 count = inboxAccs.length; + uint256 count = delayedInboxAccs.length; bytes32 messageHash = Messages.messageHash( kind, sender, @@ -99,9 +138,9 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { ); bytes32 prevAcc = 0; if (count > 0) { - prevAcc = inboxAccs[count - 1]; + prevAcc = delayedInboxAccs[count - 1]; } - inboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); + delayedInboxAccs.push(Messages.accumulateInboxMessage(prevAcc, messageHash)); emit MessageDelivered( count, prevAcc, @@ -134,7 +173,7 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { emit BridgeCallTriggered(msg.sender, to, value, data); } - function setInbox(address inbox, bool enabled) external override onlyOwner { + function setDelayedInbox(address inbox, bool enabled) external override onlyRollupOrOwner { InOutInfo storage info = allowedInboxesMap[inbox]; bool alreadyEnabled = info.allowed; emit InboxToggle(inbox, enabled); @@ -142,17 +181,19 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { return; } if (enabled) { - allowedInboxesMap[inbox] = InOutInfo(allowedInboxList.length, true); - allowedInboxList.push(inbox); + allowedInboxesMap[inbox] = InOutInfo(allowedDelayedInboxList.length, true); + allowedDelayedInboxList.push(inbox); } else { - allowedInboxList[info.index] = allowedInboxList[allowedInboxList.length - 1]; - allowedInboxesMap[allowedInboxList[info.index]].index = info.index; - allowedInboxList.pop(); + allowedDelayedInboxList[info.index] = allowedDelayedInboxList[ + allowedDelayedInboxList.length - 1 + ]; + allowedInboxesMap[allowedDelayedInboxList[info.index]].index = info.index; + allowedDelayedInboxList.pop(); delete allowedInboxesMap[inbox]; } } - function setOutbox(address outbox, bool enabled) external override onlyOwner { + function setOutbox(address outbox, bool enabled) external override onlyRollupOrOwner { InOutInfo storage info = allowedOutboxesMap[outbox]; bool alreadyEnabled = info.allowed; emit OutboxToggle(outbox, enabled); @@ -170,8 +211,12 @@ contract BridgeTester is OwnableUpgradeable, DelegateCallAware, IBridge { } } - function messageCount() external view override returns (uint256) { - return inboxAccs.length; + function delayedMessageCount() external view override returns (uint256) { + return delayedInboxAccs.length; + } + + function sequencerMessageCount() external view override returns (uint256) { + return sequencerInboxAccs.length; } receive() external payable {} diff --git a/contracts/src/test-helpers/OutboxWithoutOptTester.sol b/contracts/src/test-helpers/OutboxWithoutOptTester.sol index 589424ee24..44e841b833 100644 --- a/contracts/src/test-helpers/OutboxWithoutOptTester.sol +++ b/contracts/src/test-helpers/OutboxWithoutOptTester.sol @@ -29,10 +29,10 @@ contract OutboxWithoutOptTester is DelegateCallAware, IOutbox { L2ToL1Context internal context; uint128 public constant OUTBOX_VERSION = 2; - function initialize(address _rollup, IBridge _bridge) external { - if (rollup != address(0)) revert AlreadyInit(); - rollup = _rollup; + function initialize(IBridge _bridge) external { + if (address(bridge) != address(0)) revert AlreadyInit(); bridge = _bridge; + rollup = address(_bridge.rollup()); } function updateSendRoot(bytes32 root, bytes32 l2BlockHash) external override { diff --git a/contracts/test/contract/arbRollup.spec.ts b/contracts/test/contract/arbRollup.spec.ts index a70665254e..90ed419d07 100644 --- a/contracts/test/contract/arbRollup.spec.ts +++ b/contracts/test/contract/arbRollup.spec.ts @@ -161,7 +161,9 @@ const setup = async () => { oneStepProofEntry.address, challengeManagerTemplate.address, rollupAdminLogicTemplate.address, - rollupUserLogicTemplate.address + rollupUserLogicTemplate.address, + ethers.constants.AddressZero, + ethers.constants.AddressZero ); const nonce = await rollupCreator.signer.provider!.getTransactionCount(rollupCreator.address); @@ -211,10 +213,10 @@ const setup = async () => { rollupAdminLogicTemplate, rollupUserLogicTemplate, blockChallengeFactory: challengeManagerTemplateFac, - rollupEventBridge: await rollupAdmin.rollupEventBridge(), + rollupEventBridge: await rollupAdmin.rollupEventInbox(), outbox: await rollupAdmin.outbox(), sequencerInbox: rollupCreatedEvent.sequencerInbox, - delayedBridge: rollupCreatedEvent.delayedBridge, + delayedBridge: rollupCreatedEvent.bridge, }; }; @@ -350,12 +352,15 @@ describe("ArbRollup", () => { await expect( rollupAdmin.initialize(await getDefaultConfig(), { challengeManager: constants.AddressZero, - delayedBridge: constants.AddressZero, + bridge: constants.AddressZero, + inbox: constants.AddressZero, outbox: constants.AddressZero, rollupAdminLogic: constants.AddressZero, - rollupEventBridge: constants.AddressZero, + rollupEventInbox: constants.AddressZero, rollupUserLogic: constants.AddressZero, sequencerInbox: constants.AddressZero, + validatorUtils: constants.AddressZero, + validatorWalletCreator: constants.AddressZero, }) ).to.be.revertedWith("Initializable: contract is already initialized"); }); diff --git a/contracts/test/contract/common/rolluplib.ts b/contracts/test/contract/common/rolluplib.ts index 62db042a37..9ff085c9fa 100644 --- a/contracts/test/contract/common/rolluplib.ts +++ b/contracts/test/contract/common/rolluplib.ts @@ -30,11 +30,12 @@ export function nodeHash( hasSibling: boolean, lastHash: BytesLike, assertionExecHash: BytesLike, - inboxAcc: BytesLike + inboxAcc: BytesLike, + wasmModuleRoot: BytesLike ): BytesLike { return ethers.utils.solidityKeccak256( - ["bool", "bytes32", "bytes32", "bytes32"], - [hasSibling, lastHash, assertionExecHash, inboxAcc] + ["bool", "bytes32", "bytes32", "bytes32", "bytes32"], + [hasSibling, lastHash, assertionExecHash, inboxAcc, wasmModuleRoot] ); } @@ -153,11 +154,13 @@ export class RollupContract { const inboxPosition = BigNumber.from(assertion.afterState.globalState.u64Vals[0]).toNumber(); const afterInboxAcc = inboxPosition > 0 ? await sequencerInbox.inboxAccs(inboxPosition - 1) : constants.HashZero; + const wasmModuleRoot = await this.rollup.wasmModuleRoot(); const newNodeHash = nodeHash( !!siblingNode, (siblingNode || parentNode).nodeHash, assertionExecutionHash(assertion), - afterInboxAcc + afterInboxAcc, + wasmModuleRoot ); const tx = stakeToAdd ? await this.rollup.newStakeOnNewNode(assertion, newNodeHash, parentNode.inboxMaxCount, { @@ -242,11 +245,13 @@ export async function forceCreateNode( const inboxPosition = BigNumber.from(assertion.afterState.globalState.u64Vals[0]).toNumber(); const afterInboxAcc = inboxPosition > 0 ? await sequencerInbox.inboxAccs(inboxPosition - 1) : constants.HashZero; + const wasmModuleRoot = await rollupAdmin.wasmModuleRoot(); const newNodeHash = nodeHash( !!siblingNode, (siblingNode || parentNode).nodeHash, assertionExecutionHash(assertion), - afterInboxAcc + afterInboxAcc, + wasmModuleRoot ); const tx = await rollupAdmin.forceCreateNode( parentNode.nodeNum, diff --git a/contracts/test/contract/outboxOptimisation.spec.ts b/contracts/test/contract/outboxOptimisation.spec.ts index fadd634fd2..9e7ed82516 100644 --- a/contracts/test/contract/outboxOptimisation.spec.ts +++ b/contracts/test/contract/outboxOptimisation.spec.ts @@ -7,7 +7,7 @@ import { TransparentUpgradeableProxy__factory } from "../../build/types/factorie async function sendEth(send_account: string, to_address: string, send_token_amount: BigNumber) { const nonce = await ethers.provider.getTransactionCount(send_account, "latest"); const gas_price = await ethers.provider.getGasPrice(); - + const tx = { from: send_account, to: to_address, @@ -25,7 +25,7 @@ async function setSendRoot(cases: any, outbox: Contract, signer: Signer) { const length = cases.length; for(let i = 0; i < length; i++) { await outbox.connect(signer).updateSendRoot(cases[i].root, cases[i].l2blockhash) - } + } } const deployBehindProxy = async ( @@ -51,7 +51,7 @@ describe("Outbox", async function () { const sentEthAmount = ethers.utils.parseEther("10"); let accounts: Signer[]; let rollup: Signer; - + before(async function () { accounts = await ethers.getSigners(); const OutboxWithOpt = await ethers.getContractFactory("Outbox"); @@ -60,10 +60,10 @@ describe("Outbox", async function () { outboxWithOpt = await deployBehindProxy(accounts[0], OutboxWithOpt, await accounts[1].getAddress()) rollup = accounts[3] outboxWithoutOpt = await OutboxWithoutOpt.deploy(); - bridge = await Bridge.deploy(); - await bridge.initialize(); - await outboxWithOpt.initialize(await rollup.getAddress(), bridge.address); - await outboxWithoutOpt.initialize(await rollup.getAddress(), bridge.address); + bridge = (await Bridge.deploy()).connect(rollup); + await bridge.initialize(await rollup.getAddress()); + await outboxWithOpt.initialize(bridge.address); + await outboxWithoutOpt.initialize(bridge.address); await bridge.setOutbox(outboxWithOpt.address, true); await bridge.setOutbox(outboxWithoutOpt.address, true); await setSendRoot(cases, outboxWithOpt, rollup); @@ -71,13 +71,13 @@ describe("Outbox", async function () { await sendEth(await accounts[0].getAddress(), bridge.address, sentEthAmount); }) - it("First call to initial some storage", async function () { - await sendEth(await accounts[0].getAddress(), cases[0].to, sentEthAmount); + it("First call to initial some storage", async function () { + await sendEth(await accounts[0].getAddress(), cases[0].to, sentEthAmount); expect(await outboxWithOpt.executeTransaction(cases[0].proof, cases[0].index, cases[0].l2Sender, cases[0].to, cases[0].l2Block, cases[0].l1Block, cases[0].l2Timestamp, cases[0].value, cases[0].data)).to.emit(outboxWithOpt, "BridgeCallTriggered") expect(await outboxWithoutOpt.executeTransaction(cases[0].proof, cases[0].index, cases[0].l2Sender, cases[0].to, cases[0].l2Block, cases[0].l1Block, cases[0].l2Timestamp, cases[0].value, cases[0].data)).to.emit(outboxWithoutOpt, "BridgeCallTriggered") //await outboxWithOpt.executeTransaction(cases[0].proof,cases[0].index,cases[0].l2Sender,cases[0].to,cases[0].l2Block,cases[0].l1Block,cases[0].l2Timestamp,cases[0].value,cases[0].data); }); - + it("Call twice without storage initail cost", async function () { await sendEth(await accounts[0].getAddress(), cases[1].to, sentEthAmount); expect(await outboxWithOpt.executeTransaction(cases[1].proof, cases[1].index, cases[1].l2Sender, cases[1].to, cases[1].l2Block, cases[1].l1Block, cases[1].l2Timestamp, cases[1].value, cases[1].data)).to.emit(outboxWithOpt, "BridgeCallTriggered") @@ -89,5 +89,5 @@ describe("Outbox", async function () { expect(await outboxWithOpt.executeTransaction(cases[2].proof, cases[2].index, cases[2].l2Sender, cases[2].to, cases[2].l2Block, cases[2].l1Block, cases[2].l2Timestamp, cases[2].value, cases[2].data)).to.emit(outboxWithOpt, "BridgeCallTriggered") expect(await outboxWithoutOpt.executeTransaction(cases[2].proof, cases[2].index, cases[2].l2Sender, cases[2].to, cases[2].l2Block, cases[2].l1Block, cases[2].l2Timestamp, cases[2].value, cases[2].data)).to.emit(outboxWithoutOpt, "BridgeCallTriggered") }); - + }); \ No newline at end of file diff --git a/contracts/test/contract/sequencerInboxForceInclude.spec.ts b/contracts/test/contract/sequencerInboxForceInclude.spec.ts index cc74f294dd..74489800d8 100644 --- a/contracts/test/contract/sequencerInboxForceInclude.spec.ts +++ b/contracts/test/contract/sequencerInboxForceInclude.spec.ts @@ -30,7 +30,7 @@ import { SequencerInbox__factory, TransparentUpgradeableProxy__factory, } from '../../build/types' -import { initializeAccounts } from './utils' +import { applyAlias, initializeAccounts } from './utils' import { Event } from '@ethersproject/contracts' import { Interface } from '@ethersproject/abi' import { @@ -84,16 +84,16 @@ describe('SequencerInboxForceInclude', async () => { amount: BigNumber, data: string, ) => { - const countBefore = (await bridge.functions.messageCount())[0].toNumber() + const countBefore = (await bridge.functions.delayedMessageCount())[0].toNumber() const sendUnsignedTx = await inbox .connect(sender) .sendUnsignedTransaction(l2Gas, l2GasPrice, nonce, destAddr, amount, data) const sendUnsignedTxReceipt = await sendUnsignedTx.wait() - const countAfter = (await bridge.functions.messageCount())[0].toNumber() + const countAfter = (await bridge.functions.delayedMessageCount())[0].toNumber() expect(countAfter, 'Unexpected inbox count').to.eq(countBefore + 1) - const senderAddr = await sender.getAddress() + const senderAddr = applyAlias(await sender.getAddress()) const messageDeliveredEvent = getMessageDeliveredEvents( sendUnsignedTxReceipt, @@ -102,7 +102,7 @@ describe('SequencerInboxForceInclude', async () => { const blockL1 = await sender.provider!.getBlock(l1BlockNumber) const baseFeeL1 = blockL1.baseFeePerGas!.toNumber() const l1BlockTimestamp = blockL1.timestamp - const delayedAcc = await bridge.inboxAccs(countBefore) + const delayedAcc = await bridge.delayedInboxAccs(countBefore) // need to hex pad the address const messageDataHash = ethers.utils.solidityKeccak256( @@ -138,7 +138,7 @@ describe('SequencerInboxForceInclude', async () => { expect(prevAccumulator, 'Incorrect prev accumulator').to.eq( countBefore === 0 ? ethers.utils.hexZeroPad('0x', 32) - : await bridge.inboxAccs(countBefore - 1), + : await bridge.delayedInboxAccs(countBefore - 1), ) const nextAcc = ( @@ -248,16 +248,16 @@ describe('SequencerInboxForceInclude', async () => { ) const bridge = await bridgeFac.attach(bridgeProxy.address).connect(user) + const bridgeAdmin = await bridgeFac.attach(bridgeProxy.address).connect(dummyRollup) const sequencerInbox = await sequencerInboxFac .attach(sequencerInboxProxy.address) .connect(user) const inbox = await inboxFac.attach(inboxProxy.address).connect(user) - await bridge.initialize() + await bridge.initialize(await dummyRollup.getAddress()) await sequencerInbox.initialize( bridgeProxy.address, - await dummyRollup.getAddress(), { delayBlocks: maxDelayBlocks, delaySeconds: maxDelayTime, @@ -265,9 +265,10 @@ describe('SequencerInboxForceInclude', async () => { futureSeconds: 3000, }, ) - await inbox.initialize(bridgeProxy.address) + await inbox.initialize(bridgeProxy.address, sequencerInbox.address) - await bridge.setInbox(inbox.address, true) + await bridgeAdmin.setDelayedInbox(inbox.address, true) + await bridgeAdmin.setSequencerInbox(sequencerInbox.address) const messageTester = (await ( await ethers.getContractFactory('MessageTester') diff --git a/contracts/test/contract/utils.ts b/contracts/test/contract/utils.ts index 43548c6c19..6636685b95 100644 --- a/contracts/test/contract/utils.ts +++ b/contracts/test/contract/utils.ts @@ -1,5 +1,21 @@ import { ethers } from 'hardhat' import { Signer } from '@ethersproject/abstract-signer' +import { getAddress } from '@ethersproject/address' + +const ADDRESS_ALIAS_OFFSET = BigInt("0x1111000000000000000000000000000000001111"); +const ADDRESS_BIT_LENGTH = 160; +const ADDRESS_NIBBLE_LENGTH = ADDRESS_BIT_LENGTH / 4; + +export const applyAlias = (addr: string) => { + // we use BigInts in here to allow for proper overflow behaviour + // BigInt.asUintN calculates the correct positive modulus + return getAddress( + "0x" + + BigInt.asUintN(ADDRESS_BIT_LENGTH, BigInt(addr) + ADDRESS_ALIAS_OFFSET) + .toString(16) + .padStart(ADDRESS_NIBBLE_LENGTH, "0") + ); +}; export async function initializeAccounts(): Promise { const [account0] = await ethers.getSigners() diff --git a/contracts/test/prover/one-step-proof.ts b/contracts/test/prover/one-step-proof.ts index eb4d3d1703..56264490b0 100644 --- a/contracts/test/prover/one-step-proof.ts +++ b/contracts/test/prover/one-step-proof.ts @@ -15,13 +15,14 @@ async function sendTestMessages() { const path = msgRoot + "msg" + String(msgNum) + ".bin"; const buf = fs.readFileSync(path); await inbox.sendL2MessageFromOrigin(buf, gasOpts); - await seqInbox.addSequencerL2BatchFromOrigin(msgNum, buf, 0, ethers.constants.AddressZero, gasOpts); + // Don't use the FromOrigin variant as the stub will fail to create a batch posting report + await seqInbox.addSequencerL2Batch(msgNum, buf, 0, ethers.constants.AddressZero, gasOpts); } } describe("OneStepProof", function () { - const root = "./test/prover/proofs/"; - const dir = fs.readdirSync(root); + const arbProofsRoot = "./test/prover/proofs/"; + const specProofsRoot = "./test/prover/spec-proofs/"; before(async function () { await run("deploy", { "tags": "OneStepProofEntry" }); @@ -30,12 +31,22 @@ describe("OneStepProof", function () { await sendTestMessages(); }) - it("should deploy test harness", function() {}) - - for (let file of dir) { + const proofs = []; + for (let file of fs.readdirSync(arbProofsRoot)) { if (!file.endsWith(".json")) continue; + proofs.push([arbProofsRoot + file, file]); + } + if (fs.existsSync(specProofsRoot)) { + for (let file of fs.readdirSync(specProofsRoot)) { + if (!file.endsWith(".json")) continue; + proofs.push([specProofsRoot + file, file]); + } + } + + it("should deploy test harness with " + proofs.length + " proofs", function() {}) + + for (const [path, file] of proofs) { it("Should pass " + file + " proofs", async function () { - let path = root + file; let proofs = JSON.parse(fs.readFileSync(path).toString('utf8')); const osp = await ethers.getContract("OneStepProofEntry"); const seqInbox = await ethers.getContract("SequencerInboxStub"); @@ -48,9 +59,9 @@ describe("OneStepProof", function () { const proof = proofs[i]; isdone.push(false); const inboxLimit = 1000000; - const promise = osp.proveOneStep([inboxLimit, seqInbox.address, bridge.address], i, [...Buffer.from(proof.before, "hex")], [...Buffer.from(proof.proof, "hex")]) + const promise = osp.proveOneStep([inboxLimit, bridge.address], i, [...Buffer.from(proof.before, "hex")], [...Buffer.from(proof.proof, "hex")]) .catch((err: any) => { - console.error("Error executing proof " + i); + console.error("Error executing proof " + i, err.reason); throw err; }) .then((after: any) => assert.equal(after, "0x" + proof.after, "After state doesn't match after proof " + i)) @@ -65,6 +76,9 @@ describe("OneStepProof", function () { let stillWaiting = [] do { + if (promises.length == 0) { + break; + } const finished: any = await Promise.race(promises.map((p, k) => p.then((_: any) => k))); if (finished == promises.length - 1) { promises.pop() diff --git a/das/aggregator.go b/das/aggregator.go index f3dd2b777a..ff55f1582a 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -51,7 +51,7 @@ type Aggregator struct { config AggregatorConfig services []ServiceDetails - /// calculated fields + // calculated fields requiredServicesForStore int maxAllowedServiceStoreFailures int keysetHash [32]byte diff --git a/docs/arbos/ArbOS.md b/docs/arbos/ArbOS.md index c59ac5f93f..032b9cd9a3 100644 --- a/docs/arbos/ArbOS.md +++ b/docs/arbos/ArbOS.md @@ -106,11 +106,11 @@ This component maintains the last 256 L1 block hashes in a circular buffer. This ### [`l1PricingState`][l1PricingState_link] -In addition to supporting the [`ArbAggregator precompile`](Precompiles.md#ArbAggregator), the L1 pricing state provides tools for determining the L1 component of a transaction's gas costs. Aggregators, whose compressed batches are the messages ArbOS uses to build L2 blocks, inform ArbOS of their compression ratios so that L2 fees can be fairly allocated between the network fee account and the aggregator posting a given transaction. +In addition to supporting the [`ArbAggregator precompile`](Precompiles.md#ArbAggregator), the L1 pricing state provides tools for determining the L1 component of a transaction's gas costs. This part of the state tracks both the total amount of funds collected from transactions in L1 gas fees, as well as the funds spent by batch posters to post data batches on L1. + +Based on this information, ArbOS maintains an L1 data fee, also tracked as part of this state, which determines how much transactions will be charged for L1 fees. ArbOS dynamically adjusts this value so that fees collected are approximately equal to batch posting costs, over time. -Theoretically an aggregator can lie about its compression ratio to slightly inflate the fees their users (and only their users) pay, but a malicious aggregator already has the ability to extract MEV from them so no trust assumptions change. Lying about the ratio being higher than it is is self defeating since it burns money, as is choosing to not compress their users' transactions. -The L1 pricing state also keeps a running estimate of the L1 gas price, which updates as ArbOS processes delayed messages. [l1PricingState_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/l1pricing/l1pricing.go#L16 diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md index 436e83ec1e..d3c15ca5cb 100644 --- a/docs/arbos/Gas.md +++ b/docs/arbos/Gas.md @@ -11,12 +11,7 @@ Though subject to change when batch-compression pricing is fully implemented, [t [drop_l1_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/l1pricing/l1pricing.go#L232 ## Tips in L2 -While tips are not advised for those using the sequencer, which prioritizes transactions on a first-come first-served basis, 3rd-party aggregators may choose to order txes based on tips. A user specifies a tip by setting a gas price in excess of the basefee and will [pay that difference][pay_difference_link] on the amount of gas the tx uses. - -A poster receives the tip only when the user has set them as their [preferred aggregator](Precompiles.md#ArbAggregator). Otherwise the tip [goes to the network fee collector][goes_to_network_link]. This disincentives unpreferred aggregators from racing to post txes with large tips. - -[pay_difference_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/core/state_transition.go#L358 -[goes_to_network_link]: https://github.com/OffchainLabs/nitro/blob/c93c806a5cfe99f92a534d3c952a83c3c8b3088c/arbos/tx_processor.go#L262 +The sequencer prioritizes transactions on a first-come first-served basis. Because tips do not make sense in this model, they are ignored. Arbitrum users always just pay the basefee regardless of the tip they choose. ## Gas Estimating Retryables When a transaction schedules another, the subsequent tx's execution [will be included][estimation_inclusion_link] when estimating gas via the node's RPC. A tx's gas estimate, then, can only be found if all the txes succeed at a given gas limit. This is especially important when working with retryables and scheduling redeem attempts. diff --git a/docs/arbos/L1 Pricing.md b/docs/arbos/L1 Pricing.md new file mode 100644 index 0000000000..6066d2f93f --- /dev/null +++ b/docs/arbos/L1 Pricing.md @@ -0,0 +1,49 @@ +# L1 Pricing + +ArbOS dynamically prices L1 gas, with the price adjusting to ensure that the amount collected in L1 gas fees is as close as possible to the costs that must be covered, over time. + +## L1 fee collection + +A transaction is charged for L1 gas if and only if it arrived as part of a sequencer batch. This means that someone would have paid for L1 gas to post the transaction on the L1 chain. + +The total fee charged to a transaction is the product of the transaction's estimated size, and the current L1 Gas Basefee. + +The estimated size is measured in L1 gas and is calculated as follows: first, compress the transaction's data using the brotli-zero algorithm, then multiply the size of the result by 16. (16 is because L1 charges 16 gas per byte. L1 charges less for bytes that are zero, but that doesn't make sense here.) Brotli-zero is used in order to reward users for posting transactions that are compressible. Ideally we would like to reward for posting transactions that contribute to the compressibility (using the brotli compressor) of the entire batch, but that is a difficult notion to define and in any case would be too expensive to compute at L2. Brotli-zero is an approximation that is cheap enough to compute. + +L1 gas fee funds that are collected from transactions are transferred to a special [`L1PricerFundsPool`][L1PricerFundsPool_link] account, so that account's balance represents the amount of funds that have been collected and are available to pay for costs. + +The L1 pricer also records the total number of "data units" (the sum of the estimated sizes, after multiplying by 16) that have been received. + +[L1PricerFundsPool_link]: https://github.com/OffchainLabs/nitro/blob/3f4939df1990320310e7f39e8abb32d5c4d8045f/arbos/l1pricing/l1pricing.go#L46 + +## L1 costs + +There are two types of L1 costs: batch posting costs, and rewards. + +Batch posting costs reflect the actual cost a batch poster pays to post batch data on L1. Whenever a batch is posted, the L1 contract that records the batch will send a special "batch posting report" message to L2 ArbOS, reporting who paid for the batch and what the L1 basefee was at the time. This message is placed in the chain's delayed inbox, so it will be delivered to L2 ArbOS after some delay. + +When a batch posting report message arrives at L2, ArbOS computes the cost of the referenced batch by multiplying the reported basefee by the batch's data cost. (ArbOS retrieves the batch's data from its inbox state, and computes the L1 gas that the batch would have used by counting the number of zero bytes and non-zero bytes in the batch.) The resulting cost is recorded by the pricer as funds due to the party who is reported to have submitted the batch. + +The second type of L1 cost is an optional (per chain) per-unit reward for handling transaction calldata. In general the reward might be paid to the sequencer, or to members of the Data Availability Committee in an AnyTrust chain, or to anyone else who incurs per-calldata-byte costs on behalf of the chain. The reward is a fixed number of wei per data unit, and is paid to a single address. + +The L1 pricer keep track of the funds due to the reward address, based on the number of data units handled so far. This amount is updated whenever a batch posting report arrives at L2. + +## Allocating funds and paying what is owed + +When a batch posting report is processed at L2, the pricer allocates some of the collected funds to pay for costs incurred. To allocate funds, the pricer considers three timestamps: + +* `currentTime` is the current time, when the batch posting report message arrives at L2 +* `updateTime` is the time at which the reported batch was submitted (which will typically be around 20 minutes before currentTime) +* `lastUpdateTime` is the time at which the previous reported batch was submitted + +The pricer computes an allocation fraction `F = (updateTime-lastUpdateTime) / (currentTime-lastUpdateTime)` and allocates a fraction `F` of funds in the `L1PricerFundsPool` to the current report. The intuition is that the pricer knows how many funds have been collected between `lastUpdateTime` and `currentTime`, and we want to figure out how many of those funds to allocate to the interval between `lastUpdateTime` and `updateTime`. The given formula is the correct allocation, if we assume that funds arrived at a uniform rate during the interval between `lastUpdateTime` and `currentTime`. The pricer similarly allocates a portion of the total data units to the current report. + +Now the pricer pays out the allocated funds to cover the rewards due and the amounts due to batch posters, reducing the balance due to each party as a result. If the allocated funds aren't sufficient to cover everything that is due, some amount due will remain. If all of the amount due can be covered with the allocated funds, any remaining allocated funds are returned to the `L1PricerFundsPool`. + +## Adjusting the L1 Gas Basefee + +After allocating funds and paying what is owed, the L1 Pricer adjusts the L1 Gas Basefee. The goal of this process is to find a value that will cause the amount collected to equal the amount owed over time. + +The algorithm first computes the surplus (funds in the `L1PricerFundsPool`, minus total funds due), which might be negative. If the surplus is positive, the L1 Gas Basefee is reduced, so that the amount collected over a fixed future interval will be reduced by exactly the surplus. If the surplus is negative, the Basefee is increased so that the shortfall will be eliminated over the same fixed future interval. + +A second term is added to the L1 Gas Basefee, based on the derivative of the surplus (surplus at present, minus the surplus after the previous batch posting report was processed). This term, which is multiplied by a smoothing factor to reduce fluctuations, will reduce the Basefee is the surplus is increasing, and increase the Basefee is the surplus is shrinking. diff --git a/go-ethereum b/go-ethereum index d6e1b6a8bc..ce2d49b651 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit d6e1b6a8bc97ec9dcddd1c53ff3b4086426feea1 +Subproject commit ce2d49b6514987175cf689dd462ccc6c154c7880 diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index b507ad4418..d5fa7ace7e 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -10,12 +10,17 @@ import ( "math/big" "sort" + "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" @@ -34,6 +39,7 @@ type NodeInterface struct { Address addr backend core.NodeInterfaceBackendAPI context context.Context + header *types.Header sourceMessage types.Message returnMessage struct { message *types.Message @@ -124,7 +130,7 @@ func (n NodeInterface) EstimateRetryableTicket( pRetryTo = &to } - l1BaseFee, _ := c.State.L1PricingState().L1BaseFeeEstimateWei() + l1BaseFee, _ := c.State.L1PricingState().PricePerUnit() maxSubmissionFee := retryables.RetryableSubmissionFee(len(data), l1BaseFee) submitTx := &types.ArbitrumSubmitRetryableTx{ @@ -413,6 +419,77 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) return send, root, hashes32, nil } +func (n NodeInterface) GasEstimateComponents( + c ctx, evm mech, value huge, to addr, contractCreation bool, data []byte, +) (uint64, uint64, huge, huge, error) { + node, err := arbNodeFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, 0, nil, nil, err + } + + if to == types.NodeInterfaceAddress || to == types.NodeInterfaceDebugAddress { + return 0, 0, nil, nil, errors.New("cannot estimate virtual contract") + } + + msg := n.sourceMessage + context := n.context + chainid := evm.ChainConfig().ChainID + backend := node.Backend.APIBackend() + gasCap := backend.RPCGasCap() + block := rpc.BlockNumberOrHashWithHash(n.header.Hash(), false) + + from := msg.From() + gas := msg.Gas() + nonce := msg.Nonce() + maxFeePerGas := msg.GasFeeCap() + maxPriorityFeePerGas := msg.GasTipCap() + + args := arbitrum.TransactionArgs{ + ChainID: (*hexutil.Big)(chainid), + From: &from, + Gas: (*hexutil.Uint64)(&gas), + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + MaxPriorityFeePerGas: (*hexutil.Big)(maxPriorityFeePerGas), + Value: (*hexutil.Big)(value), + Nonce: (*hexutil.Uint64)(&nonce), + Data: (*hexutil.Bytes)(&data), + } + if !contractCreation { + args.To = &to + } + + totalRaw, err := arbitrum.EstimateGas(context, backend, args, block, gasCap) + if err != nil { + return 0, 0, nil, nil, err + } + total := uint64(totalRaw) + + pricing := c.State.L1PricingState() + + msg, err = args.ToMessage(gasCap, n.header, evm.StateDB.(*state.StateDB)) + if err != nil { + return 0, 0, nil, nil, err + } + feeForL1, _ := pricing.PosterDataCost(msg, l1pricing.BatchPosterAddress) + + baseFee, err := c.State.L2PricingState().BaseFeeWei() + if err != nil { + return 0, 0, nil, nil, err + } + l1BaseFeeEstimate, err := pricing.L1BaseFeeEstimate() + if err != nil { + return 0, 0, nil, nil, err + } + + // Compute the fee paid for L1 in L2 terms + // See in GasChargingHook that this does not induce truncation error + // + feeForL1 = arbmath.BigMulByBips(feeForL1, arbos.GasEstimationL1PricePadding) + gasForL1 := arbmath.BigDiv(feeForL1, baseFee).Uint64() + + return total, gasForL1, baseFee, l1BaseFeeEstimate, nil +} + func findBatchContainingBlock(node *arbnode.Node, genesis uint64, block uint64) (uint64, error) { if block <= genesis { return 0, fmt.Errorf("%wblock %v is part of genesis", blockInGenesis, block) diff --git a/nodeInterface/NodeInterfaceDebug.go b/nodeInterface/NodeInterfaceDebug.go index c5bdc1b6e9..b64f10420c 100644 --- a/nodeInterface/NodeInterfaceDebug.go +++ b/nodeInterface/NodeInterfaceDebug.go @@ -17,6 +17,7 @@ type NodeInterfaceDebug struct { Address addr backend core.NodeInterfaceBackendAPI context context.Context + header *types.Header sourceMessage types.Message returnMessage struct { message *types.Message diff --git a/nodeInterface/virtual-contracts.go b/nodeInterface/virtual-contracts.go index f7b130aecf..932606e34b 100644 --- a/nodeInterface/virtual-contracts.go +++ b/nodeInterface/virtual-contracts.go @@ -66,6 +66,7 @@ func init() { duplicate := *nodeInterfaceImpl duplicate.backend = backend duplicate.context = ctx + duplicate.header = header duplicate.sourceMessage = msg duplicate.returnMessage.message = returnMessage duplicate.returnMessage.changed = &swapMessages @@ -75,6 +76,7 @@ func init() { duplicate := *nodeInterfaceDebugImpl duplicate.backend = backend duplicate.context = ctx + duplicate.header = header duplicate.sourceMessage = msg duplicate.returnMessage.message = returnMessage duplicate.returnMessage.changed = &swapMessages @@ -125,17 +127,17 @@ func init() { log.Error("failed to open ArbOS state", "err", err) return } - poster, _ := state.L1PricingState().ReimbursableAggregatorForSender(msg.From()) - if poster == nil || header.BaseFee.Sign() == 0 { + if header.BaseFee.Sign() == 0 { // if gas is free or there's no reimbursable poster, the user won't pay for L1 data costs return } - posterCost, _ := state.L1PricingState().PosterDataCost(msg, msg.From(), *poster) + + posterCost, _ := state.L1PricingState().PosterDataCost(msg, header.Coinbase) posterCostInL2Gas := arbmath.BigToUintSaturating(arbmath.BigDiv(posterCost, header.BaseFee)) *gascap = arbmath.SaturatingUAdd(*gascap, posterCostInL2Gas) } - core.GetArbOSComputeRate = func(statedb *state.StateDB) (float64, error) { + core.GetArbOSSpeedLimitPerSecond = func(statedb *state.StateDB) (uint64, error) { arbosVersion := arbosState.ArbOSVersion(statedb) if arbosVersion == 0 { return 0.0, errors.New("ArbOS not installed") @@ -151,12 +153,7 @@ func init() { log.Error("failed to get the speed limit", "err", err) return 0.0, err } - rateEstimate, err := pricing.RateEstimate() - if err != nil { - log.Error("failed to get the rate estimate", "err", err) - return 0.0, err - } - return float64(rateEstimate) / float64(speedLimit), nil + return speedLimit, nil } arbSys, err := precompilesgen.ArbSysMetaData.GetAbi() diff --git a/precompiles/ArbAggregator.go b/precompiles/ArbAggregator.go index f13fd84e19..224a104bc7 100644 --- a/precompiles/ArbAggregator.go +++ b/precompiles/ArbAggregator.go @@ -5,12 +5,8 @@ package precompiles import ( "errors" + "github.com/offchainlabs/nitro/arbos/l1pricing" "math/big" - - "github.com/ethereum/go-ethereum/common" - - "github.com/offchainlabs/nitro/arbos/arbosState" - "github.com/offchainlabs/nitro/util/arbmath" ) // Provides aggregators and their users methods for configuring how they participate in L1 aggregation. @@ -20,87 +16,74 @@ type ArbAggregator struct { Address addr // 0x6d } -// Gets an account's preferred aggregator +var ErrNotOwner = errors.New("must be called by chain owner") + +// [Deprecated] func (con ArbAggregator) GetPreferredAggregator(c ctx, evm mech, address addr) (prefAgg addr, isDefault bool, err error) { - l1p := c.State.L1PricingState() - maybePrefAgg, err := l1p.UserSpecifiedAggregator(address) - if err != nil { - return common.Address{}, false, err - } - if maybePrefAgg != nil { - return *maybePrefAgg, false, nil - } - maybeReimbursableAgg, err := l1p.ReimbursableAggregatorForSender(address) - if err != nil || maybeReimbursableAgg == nil { - return common.Address{}, false, err - } - return *maybeReimbursableAgg, true, nil + return l1pricing.BatchPosterAddress, true, err } -// Sets the caller's preferred aggregator to that provided -func (con ArbAggregator) SetPreferredAggregator(c ctx, evm mech, prefAgg addr) error { - var maybePrefAgg *common.Address - if prefAgg != (common.Address{}) { - maybePrefAgg = &prefAgg - } - return c.State.L1PricingState().SetUserSpecifiedAggregator(c.caller, maybePrefAgg) +// [Deprecated] +func (con ArbAggregator) GetDefaultAggregator(c ctx, evm mech) (addr, error) { + return l1pricing.BatchPosterAddress, nil } -// Gets the chain's default aggregator -func (con ArbAggregator) GetDefaultAggregator(c ctx, evm mech) (addr, error) { - return c.State.L1PricingState().DefaultAggregator() +// Get the addresses of all current batch posters +func (con ArbAggregator) GetBatchPosters(c ctx, evm mech) ([]addr, error) { + return c.State.L1PricingState().BatchPosterTable().AllPosters(65536) } -// Sets the chain's default aggregator (caller must be the current default aggregator, its fee collector, or an owner) -func (con ArbAggregator) SetDefaultAggregator(c ctx, evm mech, newDefault addr) error { - l1State := c.State.L1PricingState() - defaultAgg, err := l1State.DefaultAggregator() +func (con ArbAggregator) AddBatchPoster(c ctx, evm mech, newBatchPoster addr) error { + isOwner, err := c.State.ChainOwners().IsMember(c.caller) if err != nil { return err } - allowed, err := accountIsAggregatorOrCollectorOrOwner(c.caller, defaultAgg, c.State) + if !isOwner { + return ErrNotOwner + } + batchPosterTable := c.State.L1PricingState().BatchPosterTable() + isBatchPoster, err := batchPosterTable.ContainsPoster(newBatchPoster) if err != nil { return err } - if !allowed { - return errors.New("Only the current default (or its fee collector / chain owner) may change the default") + if !isBatchPoster { + _, err = batchPosterTable.AddPoster(newBatchPoster, newBatchPoster) + if err != nil { + return err + } } - return l1State.SetDefaultAggregator(newDefault) + return nil } -// Get the aggregator's compression ratio, measured in basis points -func (con ArbAggregator) GetCompressionRatio(c ctx, evm mech, aggregator addr) (uint64, error) { - ratio, err := c.State.L1PricingState().AggregatorCompressionRatio(aggregator) - return uint64(ratio), err +// Gets a batch poster's fee collector +func (con ArbAggregator) GetFeeCollector(c ctx, evm mech, batchPoster addr) (addr, error) { + posterInfo, err := c.State.L1PricingState().BatchPosterTable().OpenPoster(batchPoster, false) + if err != nil { + return addr{}, err + } + return posterInfo.PayTo() } -// Set the aggregator's compression ratio, measured in basis points -func (con ArbAggregator) SetCompressionRatio(c ctx, evm mech, aggregator addr, newRatio uint64) error { - allowed, err := accountIsAggregatorOrCollectorOrOwner(c.caller, aggregator, c.State) +// Sets a batch poster's fee collector (caller must be the batch poster, its fee collector, or an owner) +func (con ArbAggregator) SetFeeCollector(c ctx, evm mech, batchPoster addr, newFeeCollector addr) error { + posterInfo, err := c.State.L1PricingState().BatchPosterTable().OpenPoster(batchPoster, false) if err != nil { return err } - if !allowed { - return errors.New("Only an aggregator (or its fee collector / chain owner) may change its compression ratio") - } - return c.State.L1PricingState().SetAggregatorCompressionRatio(aggregator, arbmath.Bips(newRatio)) -} - -// Gets an aggregator's fee collector -func (con ArbAggregator) GetFeeCollector(c ctx, evm mech, aggregator addr) (addr, error) { - return c.State.L1PricingState().AggregatorFeeCollector(aggregator) -} - -// Sets an aggregator's fee collector (caller must be the aggregator, its fee collector, or an owner) -func (con ArbAggregator) SetFeeCollector(c ctx, evm mech, aggregator addr, newFeeCollector addr) error { - allowed, err := accountIsAggregatorOrCollectorOrOwner(c.caller, aggregator, c.State) + oldFeeCollector, err := posterInfo.PayTo() if err != nil { return err } - if !allowed { - return errors.New("Only an aggregator (or its fee collector / chain owner) may change its fee collector") + if c.caller != batchPoster && c.caller != oldFeeCollector { + isOwner, err := c.State.ChainOwners().IsMember(c.caller) + if err != nil { + return err + } + if !isOwner { + return errors.New("Only a batch poster (or its fee collector / chain owner) may change its fee collector") + } } - return c.State.L1PricingState().SetAggregatorFeeCollector(aggregator, newFeeCollector) + return posterInfo.SetPayTo(newFeeCollector) } // Gets an aggregator's current fixed fee to submit a tx @@ -114,15 +97,3 @@ func (con ArbAggregator) SetTxBaseFee(c ctx, evm mech, aggregator addr, feeInL1G // This is deprecated and is now a no-op. return nil } - -func accountIsAggregatorOrCollectorOrOwner(account, aggregator addr, state *arbosState.ArbosState) (bool, error) { - if account == aggregator { - return true, nil - } - l1State := state.L1PricingState() - collector, err := l1State.AggregatorFeeCollector(aggregator) - if account == collector || err != nil { - return true, err - } - return state.ChainOwners().IsMember(account) -} diff --git a/precompiles/ArbAggregator_test.go b/precompiles/ArbAggregator_test.go index 156131a0cb..ce1cebde5d 100644 --- a/precompiles/ArbAggregator_test.go +++ b/precompiles/ArbAggregator_test.go @@ -12,76 +12,30 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" ) -func TestDefaultAggregator(t *testing.T) { +func TestArbAggregatorBatchPosters(t *testing.T) { evm := newMockEVMForTesting() context := testContext(common.Address{}, evm) addr := common.BytesToAddress(crypto.Keccak256([]byte{})[:20]) - // initial default aggregator should be zero address - def, err := ArbAggregator{}.GetDefaultAggregator(context, evm) + // initially should have one batch poster + bps, err := ArbAggregator{}.GetBatchPosters(context, evm) Require(t, err) - if def != (l1pricing.SequencerAddress) { + if len(bps) != 1 { Fail(t) } - // set default aggregator to addr + // add addr as a batch poster Require(t, ArbDebug{}.BecomeChainOwner(context, evm)) - Require(t, ArbAggregator{}.SetDefaultAggregator(context, evm, addr)) + Require(t, ArbAggregator{}.AddBatchPoster(context, evm, addr)) - // default aggregator should now be addr - res, err := ArbAggregator{}.GetDefaultAggregator(context, evm) + // there should now be two batch posters, and addr should be one of them + bps, err = ArbAggregator{}.GetBatchPosters(context, evm) Require(t, err) - if res != addr { + if len(bps) != 2 { Fail(t) } -} - -func TestPreferredAggregator(t *testing.T) { - evm := newMockEVMForTesting() - agg := ArbAggregator{} - - userAddr := common.BytesToAddress(crypto.Keccak256([]byte{0})[:20]) - defaultAggAddr := common.BytesToAddress(crypto.Keccak256([]byte{1})[:20]) - prefAggAddr := common.BytesToAddress(crypto.Keccak256([]byte{2})[:20]) - - callerCtx := testContext(common.Address{}, evm) - userCtx := testContext(userAddr, evm) - - // initial preferred aggregator should be the default of zero address - res, isDefault, err := ArbAggregator{}.GetPreferredAggregator(callerCtx, evm, userAddr) - Require(t, err) - if !isDefault { - Fail(t) - } - if res != (l1pricing.SequencerAddress) { - Fail(t) - } - - // set default aggregator - Require(t, ArbDebug{}.BecomeChainOwner(callerCtx, evm)) - Require(t, agg.SetDefaultAggregator(callerCtx, evm, defaultAggAddr)) - - // preferred aggregator should be the new default address - res, isDefault, err = agg.GetPreferredAggregator(callerCtx, evm, userAddr) - Require(t, err) - if !isDefault { - Fail(t) - } - if res != defaultAggAddr { - Fail(t) - } - - // set preferred aggregator - Require(t, agg.SetPreferredAggregator(userCtx, evm, prefAggAddr)) - - // preferred aggregator should now be prefAggAddr - res, isDefault, err = agg.GetPreferredAggregator(callerCtx, evm, userAddr) - Require(t, err) - if isDefault { - Fail(t) - } - if res != prefAggAddr { + if bps[0] != addr && bps[1] != addr { Fail(t) } } @@ -90,7 +44,7 @@ func TestFeeCollector(t *testing.T) { evm := newMockEVMForTesting() agg := ArbAggregator{} - aggAddr := common.BytesToAddress(crypto.Keccak256([]byte{0})[:20]) + aggAddr := l1pricing.BatchPosterAddress collectorAddr := common.BytesToAddress(crypto.Keccak256([]byte{1})[:20]) impostorAddr := common.BytesToAddress(crypto.Keccak256([]byte{2})[:20]) diff --git a/precompiles/ArbBLS.go b/precompiles/ArbBLS.go index 760f5099c0..19e317d15d 100644 --- a/precompiles/ArbBLS.go +++ b/precompiles/ArbBLS.go @@ -3,49 +3,7 @@ package precompiles -import ( - "github.com/offchainlabs/nitro/blsSignatures" -) - // Provides a registry of BLS public keys for accounts. type ArbBLS struct { Address addr } - -// Deprecated -- equivalent to registerAltBN128 -func (con ArbBLS) Register(c ctx, evm mech, x0, x1, y0, y1 huge) error { - return con.RegisterAltBN128(c, evm, x0, x1, y0, y1) -} - -// Deprecated -- equivalent to getAltBN128 -func (con ArbBLS) GetPublicKey(c ctx, evm mech, address addr) (huge, huge, huge, huge, error) { - return con.GetAltBN128(c, evm, address) -} - -// Associate an AltBN128 public key with the caller's address -func (con ArbBLS) RegisterAltBN128(c ctx, evm mech, x0, x1, y0, y1 huge) error { - return c.State.BLSTable().RegisterLegacyPublicKey(c.caller, x0, x1, y0, y1) -} - -// Get the AltBN128 public key associated with an address (revert if there isn't one) -func (con ArbBLS) GetAltBN128(c ctx, evm mech, address addr) (huge, huge, huge, huge, error) { - return c.State.BLSTable().GetLegacyPublicKey(address) -} - -// Associate a BLS 12-381 public key with the caller's address -func (con ArbBLS) RegisterBLS12381(c ctx, evm mech, keyBuf []byte) error { - key, err := blsSignatures.PublicKeyFromBytes(keyBuf, false) - if err != nil { - return err - } - return c.State.BLSTable().RegisterBLS12381PublicKey(c.caller, key) -} - -// Get the BLS 12-381 public key associated with an address (revert if there isn't one) -func (con ArbBLS) GetBLS12381(c ctx, evm mech, address addr) ([]byte, error) { - pubKey, err := c.State.BLSTable().GetBLS12381PublicKey(address) - if err != nil { - return nil, err - } - return blsSignatures.PublicKeyToBytes(pubKey), nil -} diff --git a/precompiles/ArbBLS_test.go b/precompiles/ArbBLS_test.go deleted file mode 100644 index 0f1b40ba2a..0000000000 --- a/precompiles/ArbBLS_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package precompiles - -import ( - "testing" - - "github.com/offchainlabs/nitro/blsSignatures" - - "github.com/ethereum/go-ethereum/common" -) - -func TestArbBLS_12381(t *testing.T) { - evm := newMockEVMForTesting() - abls := ArbBLS{} - addr1 := common.BytesToAddress([]byte{24}) - addr2 := common.BytesToAddress([]byte{42}) - context1 := testContext(addr1, evm) - context2 := testContext(addr2, evm) - - _, err := abls.GetBLS12381(context1, evm, addr2) - if err == nil { - Fail(t) - } - - pubKey2, privKey, err := blsSignatures.GenerateKeys() - Require(t, err) - err = abls.RegisterBLS12381(context2, evm, blsSignatures.PublicKeyToBytes(pubKey2)) - Require(t, err) - - recoveredPubKeyBytes2, err := abls.GetBLS12381(context1, evm, addr2) - Require(t, err) - recoveredPubKey2, err := blsSignatures.PublicKeyFromBytes(recoveredPubKeyBytes2, false) - Require(t, err) - - msg := []byte{3, 1, 4, 1, 5, 9, 2, 6} - sig, err := blsSignatures.SignMessage(privKey, msg) - Require(t, err) - success, err := blsSignatures.VerifySignature(sig, msg, recoveredPubKey2) - Require(t, err) - if !success { - Fail(t, "verification did not succeed") - } -} diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index efa1e626ae..3401ed9f2e 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -6,8 +6,6 @@ package precompiles import ( "math/big" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbos/storage" @@ -28,22 +26,17 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( evm mech, aggregator addr, ) (huge, huge, huge, huge, huge, huge, error) { - l1GasPrice, err := c.State.L1PricingState().L1BaseFeeEstimateWei() + l1GasPrice, err := c.State.L1PricingState().PricePerUnit() if err != nil { return nil, nil, nil, nil, nil, nil, err } l2GasPrice := evm.Context.BaseFee - ratio, err := c.State.L1PricingState().AggregatorCompressionRatio(aggregator) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) - perL1CalldataUnit := arbmath.BigDivByUint(arbmath.BigMulByBips(weiForL1Calldata, ratio), 16) // the cost of a simple tx without calldata - perL2Tx := arbmath.BigMulByUint(perL1CalldataUnit, 16*l1pricing.TxFixedCost) + perL2Tx := arbmath.BigMulByUint(weiForL1Calldata, l1pricing.TxFixedCost) // nitro's compute-centric l2 gas pricing has no special compute component that rises independently perArbGasBase := l2GasPrice @@ -52,37 +45,25 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( weiForL2Storage := arbmath.BigMul(l2GasPrice, storageArbGas) - return perL2Tx, perL1CalldataUnit, weiForL2Storage, perArbGasBase, perArbGasCongestion, perArbGasTotal, nil + return perL2Tx, weiForL1Calldata, weiForL2Storage, perArbGasBase, perArbGasCongestion, perArbGasTotal, nil } // Get prices in wei when using the caller's preferred aggregator func (con ArbGasInfo) GetPricesInWei(c ctx, evm mech) (huge, huge, huge, huge, huge, huge, error) { - maybeAggregator, err := c.State.L1PricingState().ReimbursableAggregatorForSender(c.caller) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } - if maybeAggregator == nil { - return con.GetPricesInWeiWithAggregator(c, evm, common.Address{}) - } - return con.GetPricesInWeiWithAggregator(c, evm, *maybeAggregator) + return con.GetPricesInWeiWithAggregator(c, evm, addr{}) } // Get prices in ArbGas when using the provided aggregator func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregator addr) (huge, huge, huge, error) { - l1GasPrice, err := c.State.L1PricingState().L1BaseFeeEstimateWei() + l1GasPrice, err := c.State.L1PricingState().PricePerUnit() if err != nil { return nil, nil, nil, err } l2GasPrice := evm.Context.BaseFee - ratio, err := c.State.L1PricingState().AggregatorCompressionRatio(aggregator) - if err != nil { - return nil, nil, nil, err - } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) - compressedCharge := arbmath.BigMulByBips(weiForL1Calldata, ratio) - gasForL1Calldata := arbmath.BigDiv(compressedCharge, l2GasPrice) + gasForL1Calldata := arbmath.BigDiv(weiForL1Calldata, l2GasPrice) perL2Tx := big.NewInt(l1pricing.TxFixedCost) return perL2Tx, gasForL1Calldata, storageArbGas, nil @@ -90,23 +71,15 @@ func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregato // Get prices in ArbGas when using the caller's preferred aggregator func (con ArbGasInfo) GetPricesInArbGas(c ctx, evm mech) (huge, huge, huge, error) { - maybeAggregator, err := c.State.L1PricingState().ReimbursableAggregatorForSender(c.caller) - if err != nil { - return nil, nil, nil, err - } - if maybeAggregator == nil { - return con.GetPricesInArbGasWithAggregator(c, evm, common.Address{}) - } - return con.GetPricesInArbGasWithAggregator(c, evm, *maybeAggregator) + return con.GetPricesInArbGasWithAggregator(c, evm, addr{}) } // Get the rollup's speed limit, pool size, and tx gas limit func (con ArbGasInfo) GetGasAccountingParams(c ctx, evm mech) (huge, huge, huge, error) { l2pricing := c.State.L2PricingState() speedLimit, _ := l2pricing.SpeedLimitPerSecond() - gasPoolMax, _ := l2pricing.GasPoolMax() - maxTxGasLimit, err := l2pricing.MaxPerBlockGasLimit() - return arbmath.UintToBig(speedLimit), big.NewInt(gasPoolMax), arbmath.UintToBig(maxTxGasLimit), err + maxTxGasLimit, err := l2pricing.PerBlockGasLimit() + return arbmath.UintToBig(speedLimit), arbmath.UintToBig(maxTxGasLimit), arbmath.UintToBig(maxTxGasLimit), err } // Get the minimum gas price needed for a transaction to succeed @@ -114,46 +87,23 @@ func (con ArbGasInfo) GetMinimumGasPrice(c ctx, evm mech) (huge, error) { return c.State.L2PricingState().MinBaseFeeWei() } -// Get the number of seconds worth of the speed limit the gas pool contains -func (con ArbGasInfo) GetGasPoolSeconds(c ctx, evm mech) (uint64, error) { - return c.State.L2PricingState().GasPoolSeconds() -} - -// Get the target fullness in bips the pricing model will try to keep the pool at -func (con ArbGasInfo) GetGasPoolTarget(c ctx, evm mech) (uint64, error) { - target, err := c.State.L2PricingState().GasPoolTarget() - return uint64(target), err -} - -// Get the extent in bips to which the pricing model favors filling the pool over increasing speeds -func (con ArbGasInfo) GetGasPoolWeight(c ctx, evm mech) (uint64, error) { - weight, err := c.State.L2PricingState().GasPoolWeight() - return uint64(weight), err -} - -// Get ArbOS's estimate of the amount of gas being burnt per second -func (con ArbGasInfo) GetRateEstimate(c ctx, evm mech) (uint64, error) { - return c.State.L2PricingState().RateEstimate() -} - -// Get how slowly ArbOS updates its estimate the amount of gas being burnt per second -func (con ArbGasInfo) GetRateEstimateInertia(c ctx, evm mech) (uint64, error) { - return c.State.L2PricingState().RateEstimateInertia() -} - // Get the current estimate of the L1 basefee func (con ArbGasInfo) GetL1BaseFeeEstimate(c ctx, evm mech) (huge, error) { - return c.State.L1PricingState().L1BaseFeeEstimateWei() + return c.State.L1PricingState().PricePerUnit() } // Get how slowly ArbOS updates its estimate of the L1 basefee func (con ArbGasInfo) GetL1BaseFeeEstimateInertia(c ctx, evm mech) (uint64, error) { - return c.State.L1PricingState().L1BaseFeeEstimateInertia() + return c.State.L1PricingState().Inertia() } -// Deprecated -- Same as getL1BaseFeeEstimate() +// Get the current estimate of the L1 basefee func (con ArbGasInfo) GetL1GasPriceEstimate(c ctx, evm mech) (huge, error) { - return con.GetL1BaseFeeEstimate(c, evm) + ppu, err := c.State.L1PricingState().PricePerUnit() + if err != nil { + return nil, err + } + return arbmath.BigMulByUint(ppu, params.TxDataNonZeroGasEIP2028), nil } // Get the fee paid to the aggregator for posting this tx @@ -161,11 +111,6 @@ func (con ArbGasInfo) GetCurrentTxL1GasFees(c ctx, evm mech) (huge, error) { return c.txProcessor.PosterFee, nil } -// Get the amount of gas remaining in the gas pool -func (con ArbGasInfo) GetGasPool(c ctx, evm mech) (int64, error) { - return c.State.L2PricingState().GasPool_preExp() -} - // Get the backlogged amount of gas burnt in excess of the speed limit func (con ArbGasInfo) GetGasBacklog(c ctx, evm mech) (uint64, error) { return c.State.L2PricingState().GasBacklog() diff --git a/precompiles/ArbOwner.go b/precompiles/ArbOwner.go index 4c3d7f349a..230130520a 100644 --- a/precompiles/ArbOwner.go +++ b/precompiles/ArbOwner.go @@ -7,7 +7,6 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/util/arbmath" ) // This precompile provides owners with tools for managing the rollup. @@ -20,6 +19,10 @@ type ArbOwner struct { OwnerActsGasCost func(bytes4, addr, []byte) (uint64, error) } +var ( + ErrOutOfBounds = errors.New("value out of bounds") +) + // Add account as a chain owner func (con ArbOwner) AddChainOwner(c ctx, evm mech, newOwner addr) error { return c.State.ChainOwners().Add(newOwner) @@ -41,17 +44,12 @@ func (con ArbOwner) IsChainOwner(c ctx, evm mech, addr addr) (bool, error) { // Retrieves the list of chain owners func (con ArbOwner) GetAllChainOwners(c ctx, evm mech) ([]common.Address, error) { - return c.State.ChainOwners().AllMembers() -} - -// Sets the L1 basefee estimate directly, bypassing the autoregression -func (con ArbOwner) SetL1BaseFeeEstimate(c ctx, evm mech, priceInWei huge) error { - return c.State.L1PricingState().SetL1BaseFeeEstimateWei(priceInWei) + return c.State.ChainOwners().AllMembers(65536) } // Set how slowly ArbOS updates its estimate of the L1 basefee func (con ArbOwner) SetL1BaseFeeEstimateInertia(c ctx, evm mech, inertia uint64) error { - return c.State.L1PricingState().SetL1BaseFeeEstimateInertia(inertia) + return c.State.L1PricingState().SetInertia(inertia) } // Sets the L2 gas price directly, bypassing the pool calculus @@ -69,26 +67,6 @@ func (con ArbOwner) SetSpeedLimit(c ctx, evm mech, limit uint64) error { return c.State.L2PricingState().SetSpeedLimitPerSecond(limit) } -// Sets the number of seconds worth of the speed limit the gas pool contains -func (con ArbOwner) SetGasPoolSeconds(c ctx, evm mech, seconds uint64) error { - return c.State.L2PricingState().SetGasPoolSeconds(seconds) -} - -// Set the target fullness in bips the pricing model will try to keep the pool at -func (con ArbOwner) SetGasPoolTarget(c ctx, evm mech, target uint64) error { - return c.State.L2PricingState().SetGasPoolTarget(arbmath.SaturatingCastToBips(target)) -} - -// Set the extent in bips to which the pricing model favors filling the pool over increasing speeds -func (con ArbOwner) SetGasPoolWeight(c ctx, evm mech, weight uint64) error { - return c.State.L2PricingState().SetGasPoolWeight(arbmath.SaturatingCastToBips(weight)) -} - -// Set how slowly ArbOS updates its estimate the amount of gas being burnt per second -func (con ArbOwner) SetRateEstimateInertia(c ctx, evm mech, inertia uint64) error { - return c.State.L2PricingState().SetRateEstimateInertia(inertia) -} - // Sets the maximum size a tx (and block) can be func (con ArbOwner) SetMaxTxGasLimit(c ctx, evm mech, limit uint64) error { return c.State.L2PricingState().SetMaxPerBlockGasLimit(limit) @@ -118,3 +96,19 @@ func (con ArbOwner) SetNetworkFeeAccount(c ctx, evm mech, newNetworkFeeAccount a func (con ArbOwner) ScheduleArbOSUpgrade(c ctx, evm mech, newVersion uint64, timestamp uint64) error { return c.State.ScheduleArbOSUpgrade(newVersion, timestamp) } + +func (con ArbOwner) SetL1PricingEquilibrationUnits(c ctx, evm mech, equilibrationUnits huge) error { + return c.State.L1PricingState().SetEquilibrationUnits(equilibrationUnits) +} + +func (con ArbOwner) SetL1PricingInertia(c ctx, evm mech, inertia uint64) error { + return c.State.L1PricingState().SetInertia(inertia) +} + +func (con ArbOwner) SetL1PricingRewardRecipient(c ctx, evm mech, recipient addr) error { + return c.State.L1PricingState().SetPayRewardsTo(recipient) +} + +func (con ArbOwner) SetL1PricingRewardRate(c ctx, evm mech, weiPerUnit uint64) error { + return c.State.L1PricingState().SetPerUnitReward(weiPerUnit) +} diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index 6d1a4fa62a..a11e07c4de 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -16,7 +16,7 @@ type ArbOwnerPublic struct { // Retrieves the list of chain owners func (con ArbOwnerPublic) GetAllChainOwners(c ctx, evm mech) ([]common.Address, error) { - return c.State.ChainOwners().AllMembers() + return c.State.ChainOwners().AllMembers(65536) } // See if the user is a chain owner diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index eaf1eb72be..994e3b7955 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -32,8 +32,7 @@ func TestAddressSet(t *testing.T) { callCtx := testContext(caller, evm) // the zero address is an owner by default - ZeroAddressL2 := util.RemapL1Address(common.Address{}) - Require(t, prec.RemoveChainOwner(callCtx, evm, ZeroAddressL2)) + Require(t, prec.RemoveChainOwner(callCtx, evm, common.Address{})) Require(t, prec.AddChainOwner(callCtx, evm, addr1)) Require(t, prec.AddChainOwner(callCtx, evm, addr2)) diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 13f3596951..ab6b8960b3 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -7,6 +7,7 @@ import ( "errors" "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -20,11 +21,11 @@ type ArbRetryableTx struct { Address addr TicketCreated func(ctx, mech, bytes32) error LifetimeExtended func(ctx, mech, bytes32, huge) error - RedeemScheduled func(ctx, mech, bytes32, bytes32, uint64, uint64, addr) error + RedeemScheduled func(ctx, mech, bytes32, bytes32, uint64, uint64, addr, huge, huge) error Canceled func(ctx, mech, bytes32) error TicketCreatedGasCost func(bytes32) (uint64, error) LifetimeExtendedGasCost func(bytes32, huge) (uint64, error) - RedeemScheduledGasCost func(bytes32, bytes32, uint64, uint64, addr) (uint64, error) + RedeemScheduledGasCost func(bytes32, bytes32, uint64, uint64, addr, huge, huge) (uint64, error) CanceledGasCost func(bytes32) (uint64, error) // deprecated event @@ -68,6 +69,8 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er } nonce := nextNonce - 1 + maxRefund := new(big.Int).Exp(common.Big2, common.Big256, nil) + maxRefund.Sub(maxRefund, common.Big1) retryTxInner, err := retryable.MakeTx( evm.ChainConfig().ChainID, nonce, @@ -75,6 +78,8 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er 0, // will fill this in below ticketId, c.caller, + maxRefund, + common.Big0, ) if err != nil { return hash{}, err @@ -82,7 +87,7 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er // figure out how much gas the event issuance will cost, and reduce the donated gas amount in the event // by that much, so that we'll donate the correct amount of gas - eventCost, err := con.RedeemScheduledGasCost(hash{}, hash{}, 0, 0, addr{}) + eventCost, err := con.RedeemScheduledGasCost(hash{}, hash{}, 0, 0, addr{}, common.Big0, common.Big0) if err != nil { return hash{}, err } @@ -104,7 +109,7 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er retryTx := types.NewTx(retryTxInner) retryTxHash := retryTx.Hash() - err = con.RedeemScheduled(c, evm, ticketId, retryTxHash, nonce, gasToDonate, c.caller) + err = con.RedeemScheduled(c, evm, ticketId, retryTxHash, nonce, gasToDonate, c.caller, maxRefund, common.Big0) if err != nil { return hash{}, err } @@ -118,7 +123,7 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er // Add the gasToDonate back to the gas pool: the retryable attempt will then consume it. // This ensures that the gas pool has enough gas to run the retryable attempt. - return retryTxHash, c.State.L2PricingState().AddToGasPool(arbmath.SaturatingCast(gasToDonate), c.State.FormatVersion()) + return retryTxHash, c.State.L2PricingState().AddToGasPool(arbmath.SaturatingCast(gasToDonate)) } // Gets the default lifetime period a retryable has at creation @@ -213,6 +218,14 @@ func (con ArbRetryableTx) Cancel(c ctx, evm mech, ticketId bytes32) error { return con.Canceled(c, evm, ticketId) } +func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, error) { + if c.txProcessor.CurrentRefundTo != nil { + return *c.txProcessor.CurrentRefundTo, nil + } else { + return common.Address{}, nil + } +} + func (con ArbRetryableTx) SubmitRetryable( c ctx, evm mech, requestId bytes32, l1BaseFee, deposit, callvalue, gasFeeCap huge, gasLimit uint64, maxSubmissionFee huge, diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 162a676bdf..bbfdfd42ce 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -7,8 +7,6 @@ import ( "errors" "math/big" - "github.com/offchainlabs/nitro/arbos/l2pricing" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/vm" @@ -32,48 +30,6 @@ type ArbSys struct { var InvalidBlockNum = errors.New("Invalid block number") -func (con *ArbSys) emitL2ToL1Tx( - c ctx, - evm mech, - destination addr, - hash huge, - position huge, - ethBlockNum huge, - callvalue huge, - data []byte, -) error { - if c.State.FormatVersion() >= l2pricing.FirstExponentialPricingVersion { - return con.L2ToL1Tx( - c, - evm, - c.caller, - destination, - hash, - position, - evm.Context.BlockNumber, - ethBlockNum, - evm.Context.Time, - callvalue, - data, - ) - } else { - return con.L2ToL1Transaction( - c, - evm, - c.caller, - destination, - hash, - position, - big.NewInt(0), - evm.Context.BlockNumber, - ethBlockNum, - evm.Context.Time, - callvalue, - data, - ) - } -} - // Gets the current L2 block number func (con *ArbSys) ArbBlockNumber(c ctx, evm mech) (huge, error) { return evm.Context.BlockNumber, nil @@ -198,13 +154,16 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal leafNum := big.NewInt(int64(size - 1)) - err = con.emitL2ToL1Tx( + err = con.L2ToL1Tx( c, evm, + c.caller, destination, sendHash.Big(), leafNum, + evm.Context.BlockNumber, bigL1BlockNum, + evm.Context.Time, value, calldataForL1, ) diff --git a/precompiles/ArbosActs.go b/precompiles/ArbosActs.go index 9bc84d94d6..4cd5baf7f9 100644 --- a/precompiles/ArbosActs.go +++ b/precompiles/ArbosActs.go @@ -11,6 +11,10 @@ type ArbosActs struct { CallerNotArbOSError func() error } -func (con ArbosActs) StartBlock(c ctx, evm mech, l1BaseFee, l2BaseFeeLastBlock huge, l1BlockNumber, timeLastBlock uint64) error { +func (con ArbosActs) StartBlock(c ctx, evm mech, l1BaseFee huge, l1BlockNumber, l2BlockNumber, timeLastBlock uint64) error { + return con.CallerNotArbOSError() +} + +func (con ArbosActs) BatchPostingReport(c ctx, evm mech, batchTimestamp huge, batchPosterAddress addr, batchNumber uint64, batchDataGas uint64, l1BaseFeeWei huge) error { return con.CallerNotArbOSError() } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 55855466d5..9a53c9e7ac 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -58,12 +58,14 @@ const ( ) type Precompile struct { - methods map[[4]byte]PrecompileMethod - events map[string]PrecompileEvent - errors map[string]PrecompileError - implementer reflect.Value - address common.Address - arbosVersion uint64 + methods map[[4]byte]PrecompileMethod + methodsByName map[string]PrecompileMethod + events map[string]PrecompileEvent + errors map[string]PrecompileError + name string + implementer reflect.Value + address common.Address + arbosVersion uint64 } type PrecompileMethod struct { @@ -154,6 +156,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, Pre } methods := make(map[[4]byte]PrecompileMethod) + methodsByName := make(map[string]PrecompileMethod) events := make(map[string]PrecompileEvent) errors := make(map[string]PrecompileError) @@ -218,13 +221,15 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, Pre ) } - methods[id] = PrecompileMethod{ + method := PrecompileMethod{ name, method, purity, handler, 0, } + methods[id] = method + methodsByName[name] = method } // provide the implementer mechanisms to emit logs for the solidity events @@ -487,8 +492,10 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, Pre return address, Precompile{ methods, + methodsByName, events, errors, + contract, reflect.ValueOf(implementer), address, 0, @@ -518,7 +525,6 @@ func Precompiles() map[addr]ArbosPrecompile { insert(MakePrecompile(templates.ArbGasInfoMetaData, &ArbGasInfo{Address: hex("6c")})) insert(MakePrecompile(templates.ArbAggregatorMetaData, &ArbAggregator{Address: hex("6d")})) insert(MakePrecompile(templates.ArbStatisticsMetaData, &ArbStatistics{Address: hex("6f")})) - insert(MakePrecompile(templates.ArbosActsMetaData, &ArbosActs{Address: types.ArbosAddress})) eventCtx := func(gasLimit uint64, err error) *Context { if err != nil { @@ -534,9 +540,9 @@ func Precompiles() map[addr]ArbosPrecompile { ArbRetryable := insert(MakePrecompile(templates.ArbRetryableTxMetaData, ArbRetryableImpl)) arbos.ArbRetryableTxAddress = ArbRetryable.address arbos.RedeemScheduledEventID = ArbRetryable.events["RedeemScheduled"].template.ID - emitReedeemScheduled := func(evm mech, gas, nonce uint64, ticketId, retryTxHash bytes32, donor addr) error { - context := eventCtx(ArbRetryableImpl.RedeemScheduledGasCost(hash{}, hash{}, 0, 0, addr{})) - return ArbRetryableImpl.RedeemScheduled(context, evm, ticketId, retryTxHash, nonce, gas, donor) + emitReedeemScheduled := func(evm mech, gas, nonce uint64, ticketId, retryTxHash bytes32, donor addr, maxRefund *big.Int, submissionFeeRefund *big.Int) error { + context := eventCtx(ArbRetryableImpl.RedeemScheduledGasCost(hash{}, hash{}, 0, 0, addr{}, common.Big0, common.Big0)) + return ArbRetryableImpl.RedeemScheduled(context, evm, ticketId, retryTxHash, nonce, gas, donor, maxRefund, submissionFeeRefund) } arbos.EmitReedeemScheduledEvent = emitReedeemScheduled arbos.EmitTicketCreatedEvent = func(evm mech, ticketId bytes32) error { @@ -559,6 +565,10 @@ func Precompiles() map[addr]ArbosPrecompile { insert(ownerOnly(ArbOwnerImpl.Address, ArbOwner, emitOwnerActs)) insert(debugOnly(MakePrecompile(templates.ArbDebugMetaData, &ArbDebug{Address: hex("ff")}))) + ArbosActs := insert(MakePrecompile(templates.ArbosActsMetaData, &ArbosActs{Address: types.ArbosAddress})) + arbos.InternalTxStartBlockMethodID = ArbosActs.GetMethodID("StartBlock") + arbos.InternalTxBatchPostingReportMethodID = ArbosActs.GetMethodID("BatchPostingReport") + return contracts } @@ -567,6 +577,14 @@ func (p Precompile) SwapImpl(impl interface{}) Precompile { return p } +func (p Precompile) GetMethodID(name string) bytes4 { + method, ok := p.methodsByName[name] + if !ok { + panic(fmt.Sprintf("Precompile %v does not have a method with the name %v", p.name, name)) + } + return *(*bytes4)(method.template.ID) +} + // call a precompile in typed form, deserializing its inputs and serializing its outputs func (p Precompile) Call( input []byte, diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index 69dc29f4d0..00e233ba18 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -92,7 +92,7 @@ func (wrapper *OwnerPrecompile) Call( return nil, burner.gasLeft, err } - if !isOwner && (state.FormatVersion() >= 2 || caller != arbosState.TestnetUpgrade2Owner) { + if !isOwner { return nil, burner.gasLeft, errors.New("unauthorized caller to access-controlled method") } diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 58d52f4a00..74c362dd00 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -93,11 +93,25 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, expensiveTx bo Fail(t, "Unexpected balance:", l2balance) } - lastBlockHeader, err := l2clientB.HeaderByNumber(ctx, nil) + lastBlock, err := l2clientB.BlockByNumber(ctx, nil) Require(t, err) + for { + usefulBlock := false + for _, tx := range lastBlock.Transactions() { + if tx.Type() != types.ArbitrumInternalTxType { + usefulBlock = true + break + } + } + if usefulBlock { + break + } + lastBlock, err = l2clientB.BlockByHash(ctx, lastBlock.ParentHash()) + Require(t, err) + } testDeadLine, _ := t.Deadline() nodeA.StopAndWait() - if !nodeB.BlockValidator.WaitForBlock(lastBlockHeader.Number.Uint64(), time.Until(testDeadLine)-time.Second*10) { + if !nodeB.BlockValidator.WaitForBlock(lastBlock.NumberU64(), time.Until(testDeadLine)-time.Second*10) { Fail(t, "did not validate all blocks") } nodeB.StopAndWait() diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 1ec7c14076..a1c500e630 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -160,7 +160,18 @@ func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client cli l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil)}) l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) - addresses, err := arbnode.DeployOnL1(ctx, l1client, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, common.Hash{}, chainId, headerreader.TestConfig, validator.DefaultNitroMachineConfig) + addresses, err := arbnode.DeployOnL1( + ctx, + l1client, + &l1TransactionOpts, + l1info.GetAddress("Sequencer"), + l1info.GetAddress("RollupOwner"), + 0, + common.Hash{}, + chainId, + headerreader.TestConfig, + validator.DefaultNitroMachineConfig, + ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) l1info.SetContract("SequencerInbox", addresses.SequencerInbox) @@ -212,6 +223,8 @@ func CreateTestNodeOnL1WithConfig(t *testing.T, ctx context.Context, isSequencer if !isSequencer { nodeConfig.BatchPoster.Enable = false + nodeConfig.Sequencer.Enable = false + nodeConfig.DelayedSequencer.Enable = false } node, err := arbnode.CreateNode(ctx, l2stack, l2chainDb, nodeConfig, l2blockchain, l1client, addresses, sequencerTxOptsPtr, nil) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index 29b3af496b..b762c9776c 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -8,11 +8,18 @@ import ( "math/big" "testing" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/colors" + "github.com/offchainlabs/nitro/util/testhelpers" ) func TestDeploy(t *testing.T) { @@ -118,3 +125,93 @@ func TestEstimate(t *testing.T) { Fail(t, "Unexpected counter value", counter) } } + +func TestComponentEstimate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l2info, node, client := CreateTestL2(t, ctx) + l1BaseFee := big.NewInt(l1pricing.InitialPricePerUnitWei * 16) + l2BaseFee := GetBaseFee(t, client, ctx) + + colors.PrintGrey("l1 basefee ", l1BaseFee) + colors.PrintGrey("l2 basefee ", l2BaseFee) + + userBalance := big.NewInt(1e16) + maxPriorityFeePerGas := big.NewInt(0) + maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2) + + l2info.GenerateAccount("User") + TransferBalance(t, "Owner", "User", userBalance, l2info, client, ctx) + + from := l2info.GetAddress("User") + to := testhelpers.RandomAddress() + gas := uint64(100000000) + calldata := []byte{0x00, 0x12} + value := big.NewInt(4096) + + nodeAbi, err := node_interfacegen.NodeInterfaceMetaData.GetAbi() + Require(t, err) + + nodeMethod := nodeAbi.Methods["gasEstimateComponents"] + estimateCalldata := append([]byte{}, nodeMethod.ID...) + packed, err := nodeMethod.Inputs.Pack(to, false, calldata) + Require(t, err) + estimateCalldata = append(estimateCalldata, packed...) + + msg := ethereum.CallMsg{ + From: from, + To: &types.NodeInterfaceAddress, + Gas: gas, + GasFeeCap: maxFeePerGas, + GasTipCap: maxPriorityFeePerGas, + Value: value, + Data: estimateCalldata, + } + returnData, err := client.CallContract(ctx, msg, nil) + Require(t, err) + + outputs, err := nodeMethod.Outputs.Unpack(returnData) + Require(t, err) + if len(outputs) != 4 { + Fail(t, "expected 4 outputs from gasEstimateComponents, got", len(outputs)) + } + + gasEstimate, _ := outputs[0].(uint64) + gasEstimateForL1, _ := outputs[1].(uint64) + baseFee, _ := outputs[2].(*big.Int) + l1BaseFeeEstimate, _ := outputs[3].(*big.Int) + + tx := l2info.SignTxAs("User", &types.DynamicFeeTx{ + ChainID: node.ArbInterface.BlockChain().Config().ChainID, + Nonce: 0, + GasTipCap: maxPriorityFeePerGas, + GasFeeCap: maxFeePerGas, + Gas: gasEstimate, + To: (*common.Address)(&to), + Value: value, + Data: calldata, + }) + + l2Estimate := gasEstimate - gasEstimateForL1 + + colors.PrintBlue("Est. ", gasEstimate, " - ", gasEstimateForL1, " = ", l2Estimate) + + if !arbmath.BigEquals(l1BaseFeeEstimate, l1BaseFee) { + Fail(t, l1BaseFeeEstimate, l1BaseFee) + } + if !arbmath.BigEquals(baseFee, l2BaseFee) { + Fail(t, baseFee, l2BaseFee.Uint64()) + } + + Require(t, client.SendTransaction(ctx, tx)) + receipt, err := EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + l2Used := receipt.GasUsed - receipt.GasUsedForL1 + colors.PrintMint("True ", receipt.GasUsed, " - ", receipt.GasUsedForL1, " = ", l2Used) + + if l2Estimate != l2Used { + Fail(t, l2Estimate, l2Used) + } +} diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index d446ed6758..751a526d63 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -7,153 +7,166 @@ import ( "context" "math/big" "testing" + "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/colors" - "github.com/offchainlabs/nitro/util/testhelpers" ) -func TestTips(t *testing.T) { +func TestSequencerFeePaid(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, _, l2client, l1info, _, l1client, stack := CreateTestNodeOnL1(t, ctx, true) + l2info, _, l2client, _, _, _, stack := CreateTestNodeOnL1(t, ctx, true) defer stack.Close() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) callOpts := l2info.GetDefaultCallOpts("Owner", ctx) - aggregator := testhelpers.RandomAddress() // get the network fee account arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), l2client) Require(t, err, "could not deploy ArbOwner contract") + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + Require(t, err, "could not deploy ArbOwner contract") networkFeeAccount, err := arbOwnerPublic.GetNetworkFeeAccount(callOpts) Require(t, err, "could not get the network fee account") - // set a preferred aggregator who won't be the one to post the tx - arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), l2client) - Require(t, err, "could not deploy ArbAggregator contract") - tx, err := arbAggregator.SetPreferredAggregator(&auth, aggregator) - Require(t, err, "could not set L2 gas price") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + l1Estimate, err := arbGasInfo.GetL1BaseFeeEstimate(callOpts) Require(t, err) - - basefee := GetBaseFee(t, l2client, ctx) - auth.GasFeeCap = arbmath.BigMulByUfrac(basefee, 5, 4) // add room for a 20% tip - auth.GasTipCap = arbmath.BigMulByUfrac(basefee, 1, 4) // add a 20% tip - networkBefore := GetBalance(t, ctx, l2client, networkFeeAccount) - // use L1 to post a message since the sequencer won't do it - nosend := auth - nosend.NoSend = true - tx, err = arbAggregator.SetPreferredAggregator(&nosend, aggregator) - Require(t, err) - receipt := SendSignedTxViaL1(t, ctx, l1info, l1client, l2client, tx) - if receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "failed to prefer the sequencer") - } + l2info.GasPrice = GetBaseFee(t, l2client, ctx) + tx, receipt := TransferBalance(t, "Faucet", "Faucet", big.NewInt(0), l2info, l2client, ctx) + txSize := compressedTxSize(t, tx) networkAfter := GetBalance(t, ctx, l2client, networkFeeAccount) - colors.PrintMint("network: ", networkFeeAccount, networkBefore, networkAfter) - colors.PrintBlue("pricing: ", l2info.GasPrice, auth.GasFeeCap, auth.GasTipCap) - colors.PrintBlue("payment: ", tx.GasPrice(), tx.GasFeeCap(), tx.GasTipCap()) - - if !arbmath.BigEquals(tx.GasPrice(), auth.GasFeeCap) { - Fail(t, "user did not pay the tip") - } + l1Charge := arbmath.BigMulByUint(l2info.GasPrice, receipt.GasUsedForL1) - tip := arbmath.BigMulByUint(arbmath.BigSub(tx.GasPrice(), basefee), receipt.GasUsed) - full := arbmath.BigMulByUint(basefee, receipt.GasUsed) // was gasprice before upgrade networkRevenue := arbmath.BigSub(networkAfter, networkBefore) - colors.PrintMint("price: ", tip, full, networkRevenue) - colors.PrintRed("used: ", receipt.GasUsed, basefee) - - if !arbmath.BigEquals(full, networkRevenue) { - Fail(t, "the network didn't receive the funds") + gasUsedForL2 := receipt.GasUsed - receipt.GasUsedForL1 + if !arbmath.BigEquals(networkRevenue, arbmath.BigMulByUint(tx.GasPrice(), gasUsedForL2)) { + Fail(t, "network didn't receive expected payment") } -} - -// Test that the sequencer won't subvert a user's aggregation preferences -func TestSequencerWontPostWhenNotPreferred(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2info, _, client := CreateTestL2(t, ctx) - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + l1GasBought := arbmath.BigDiv(l1Charge, l1Estimate).Uint64() + l1GasActual := txSize * params.TxDataNonZeroGasEIP2028 - // prefer a 3rd party aggregator - arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), client) - Require(t, err, "could not deploy ArbAggregator contract") - tx, err := arbAggregator.SetPreferredAggregator(&auth, testhelpers.RandomAddress()) - Require(t, err, "could not set L2 gas price") - _, err = EnsureTxSucceeded(ctx, client, tx) - Require(t, err) + colors.PrintBlue("bytes ", l1GasBought/params.TxDataNonZeroGasEIP2028, txSize) - // get the network fee account - _, err = arbAggregator.SetPreferredAggregator(&auth, testhelpers.RandomAddress()) - colors.PrintBlue("expecting error: ", err) - if err == nil { - Fail(t, "the sequencer should have rejected this tx") + if l1GasBought != l1GasActual { + Fail(t, "the sequencer's future revenue does not match its costs", l1GasBought, l1GasActual) } } -func TestSequencerFeePaid(t *testing.T) { +func TestSequencerPriceAdjusts(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, _, l2client, _, _, _, stack := CreateTestNodeOnL1(t, ctx, true) - defer stack.Close() - callOpts := l2info.GetDefaultCallOpts("Owner", ctx) + chainConfig := params.ArbitrumDevTestChainConfig() + conf := arbnode.ConfigDefaultL1Test() + conf.DelayedSequencer.FinalizeDistance = 1 - // get the network fee account - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), l2client) - Require(t, err, "could not deploy ArbOwner contract") - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) - Require(t, err, "could not deploy ArbOwner contract") - networkFeeAccount, err := arbOwnerPublic.GetNetworkFeeAccount(callOpts) - Require(t, err, "could not get the network fee account") + l2info, node, l2client, _, _, l1client, stack := CreateTestNodeOnL1WithConfig(t, ctx, true, conf, chainConfig) + defer stack.Close() - l1Estimate, err := arbGasInfo.GetL1GasPriceEstimate(callOpts) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + Require(t, err) + lastEstimate, err := arbGasInfo.GetL1BaseFeeEstimate(&bind.CallOpts{Context: ctx}) + Require(t, err) + lastBatchCount, err := node.InboxTracker.GetBatchCount() + Require(t, err) + l1Header, err := l1client.HeaderByNumber(ctx, nil) Require(t, err) - networkBefore := GetBalance(t, ctx, l2client, networkFeeAccount) - seqBefore := GetBalance(t, ctx, l2client, l1pricing.SequencerAddress) - - l2info.GasPrice = GetBaseFee(t, l2client, ctx) - tx, receipt := TransferBalance(t, "Faucet", "Faucet", big.NewInt(0), l2info, l2client, ctx) - - networkAfter := GetBalance(t, ctx, l2client, networkFeeAccount) - seqAfter := GetBalance(t, ctx, l2client, l1pricing.SequencerAddress) - networkRevenue := arbmath.BigSub(networkAfter, networkBefore) - seqRevenue := arbmath.BigSub(seqAfter, seqBefore) + sequencerBalanceBefore := GetBalance(t, ctx, l2client, l1pricing.BatchPosterAddress) + timesPriceAdjusted := 0 + + colors.PrintBlue("Initial values") + colors.PrintBlue(" L1 base fee ", l1Header.BaseFee) + colors.PrintBlue(" L1 estimate ", lastEstimate) + + for i := 0; i < 128; i++ { + tx, receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2info, l2client, ctx) + header, err := l2client.HeaderByHash(ctx, receipt.BlockHash) + Require(t, err) + + units := compressedTxSize(t, tx) * params.TxDataNonZeroGasEIP2028 + estimatedL1FeePerUnit := arbmath.BigDivByUint(arbmath.BigMulByUint(header.BaseFee, receipt.GasUsedForL1), units) + + if !arbmath.BigEquals(lastEstimate, estimatedL1FeePerUnit) { + l1Header, err = l1client.HeaderByNumber(ctx, nil) + Require(t, err) + + callOpts := &bind.CallOpts{Context: ctx, BlockNumber: receipt.BlockNumber} + actualL1FeePerUnit, err := arbGasInfo.GetL1BaseFeeEstimate(callOpts) + Require(t, err) + + colors.PrintGrey("ArbOS updated its L1 estimate") + colors.PrintGrey(" L1 base fee ", l1Header.BaseFee) + colors.PrintGrey(" L1 estimate ", lastEstimate, " ➤ ", estimatedL1FeePerUnit, " = ", actualL1FeePerUnit) + + oldDiff := arbmath.BigAbs(arbmath.BigSub(lastEstimate, l1Header.BaseFee)) + newDiff := arbmath.BigAbs(arbmath.BigSub(actualL1FeePerUnit, l1Header.BaseFee)) + + if arbmath.BigGreaterThan(newDiff, oldDiff) { + Fail(t, "L1 gas price estimate should tend toward the basefee") + } + diff := arbmath.BigAbs(arbmath.BigSub(actualL1FeePerUnit, estimatedL1FeePerUnit)) + maxDiffToAllow := arbmath.BigDivByUint(actualL1FeePerUnit, 100) + if arbmath.BigLessThan(maxDiffToAllow, diff) { // verify that estimates is within 1% of actual + Fail(t, "New L1 estimate differs too much from receipt") + } + if arbmath.BigEquals(actualL1FeePerUnit, common.Big0) { + Fail(t, "Estimate is zero", i) + } + lastEstimate = actualL1FeePerUnit + timesPriceAdjusted++ + } + + if i%16 == 0 { + // see that the inbox advances + + for j := 16; j > 0; j-- { + newBatchCount, err := node.InboxTracker.GetBatchCount() + Require(t, err) + if newBatchCount > lastBatchCount { + colors.PrintGrey("posted new batch ", newBatchCount) + lastBatchCount = newBatchCount + break + } + if j == 1 { + Fail(t, "batch count didn't update in time") + } + time.Sleep(time.Millisecond * 100) + } + } + } - gasUsedForL2 := receipt.GasUsed - receipt.GasUsedForL1 + sequencerBalanceAfter := GetBalance(t, ctx, l2client, l1pricing.BatchPosterAddress) + colors.PrintMint("sequencer balance ", sequencerBalanceBefore, " ➤ ", sequencerBalanceAfter) + colors.PrintMint("price changes ", timesPriceAdjusted) - if !arbmath.BigEquals(seqRevenue, arbmath.BigMulByUint(tx.GasPrice(), receipt.GasUsedForL1)) { - Fail(t, "sequencer didn't receive expected payment") + if timesPriceAdjusted == 0 { + Fail(t, "L1 gas price estimate never adjusted") } - if !arbmath.BigEquals(networkRevenue, arbmath.BigMulByUint(tx.GasPrice(), gasUsedForL2)) { - Fail(t, "network didn't receive expected payment") + if !arbmath.BigGreaterThan(sequencerBalanceAfter, sequencerBalanceBefore) { + Fail(t, "sequencer didn't get paid") } +} - paidBytes := arbmath.BigDiv(seqRevenue, l1Estimate).Uint64() / params.TxDataNonZeroGasEIP2028 - +func compressedTxSize(t *testing.T, tx *types.Transaction) uint64 { txBin, err := tx.MarshalBinary() Require(t, err) compressed, err := arbcompress.CompressFast(txBin) Require(t, err) - - if uint64(len(compressed)) != paidBytes { - t.Fatal("unexpected number of bytes paid for") - } - + return uint64(len(compressed)) } diff --git a/system_tests/replay_fuzz/replay_fuzz.go b/system_tests/replay_fuzz/replay_fuzz.go index f296e082de..fa867c09f6 100644 --- a/system_tests/replay_fuzz/replay_fuzz.go +++ b/system_tests/replay_fuzz/replay_fuzz.go @@ -29,6 +29,7 @@ func BuildBlock( chainContext core.ChainContext, chainConfig *params.ChainConfig, inbox arbstate.InboxBackend, + seqBatch []byte, ) (*types.Block, error) { var delayedMessagesRead uint64 if lastBlockHeader != nil { @@ -45,10 +46,13 @@ func BuildBlock( delayedMessagesRead = inboxMultiplexer.DelayedMessagesRead() l1Message := message.Message - block, _ := arbos.ProduceBlock( - l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, + batchFetcher := func(uint64) ([]byte, error) { + return seqBatch, nil + } + block, _, err := arbos.ProduceBlock( + l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher, ) - return block, nil + return block, err } // A simple mock inbox multiplexer backend @@ -105,7 +109,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func Fuzz(input []byte) int { chainDb := rawdb.NewMemoryDatabase() - stateRoot, err := arbosState.InitializeArbosInDatabase(chainDb, statetransfer.NewMemoryInitDataReader(&statetransfer.ArbosInitializationInfo{}), params.ArbitrumTestnetChainConfig()) + stateRoot, err := arbosState.InitializeArbosInDatabase(chainDb, statetransfer.NewMemoryInitDataReader(&statetransfer.ArbosInitializationInfo{}), params.ArbitrumDevnetChainConfig()) if err != nil { panic(err) } @@ -142,7 +146,7 @@ func Fuzz(input []byte) int { positionWithinMessage: 0, delayedMessages: delayedMessages, } - _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox) + _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox, seqBatch) if err != nil { // With the fixed header it shouldn't be possible to read a delayed message, // and no other type of error should be possible. diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index e2de3ed129..6cdd117114 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbos/l2pricing" @@ -50,16 +51,27 @@ func retryableSetup(t *testing.T) ( lookupSubmitRetryableL2TxHash := func(l1Receipt *types.Receipt) common.Hash { messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber) Require(t, err) - if len(messages) != 1 { - Fail(t, "expected 1 message from retryable submission, found", len(messages)) + if len(messages) == 0 { + Fail(t, "didn't find message for retryable submission") } - txs, err := messages[0].Message.ParseL2Transactions(params.ArbitrumDevTestChainConfig().ChainID) - Require(t, err) - if len(txs) != 1 { - Fail(t, "expected 1 tx from retryable submission, found", len(txs)) + var submissionTxs []*types.Transaction + for _, message := range messages { + if message.Message.Header.Kind != arbos.L1MessageType_SubmitRetryable { + continue + } + txs, err := message.Message.ParseL2Transactions(params.ArbitrumDevTestChainConfig().ChainID, nil) + Require(t, err) + for _, tx := range txs { + if tx.Type() == types.ArbitrumSubmitRetryableTxType { + submissionTxs = append(submissionTxs, tx) + } + } + } + if len(submissionTxs) != 1 { + Fail(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) } - return txs[0].Hash() + return submissionTxs[0].Hash() } // burn some gas so that the faucet's Callvalue + Balance never exceeds a uint256 @@ -193,7 +205,7 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { // send enough L2 gas for intrinsic but not compute big.NewInt(int64(params.TxGas+params.TxDataNonZeroGasEIP2028*4)), big.NewInt(l2pricing.InitialBaseFeeWei*2), - simpleABI.Methods["increment"].ID, + simpleABI.Methods["incrementRedeem"].ID, ) Require(t, err) @@ -246,6 +258,19 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { if counter != 1 { Fail(t, "Unexpected counter:", counter) } + + if len(receipt.Logs) != 1 { + Fail(t, "Unexpected log count:", len(receipt.Logs)) + } + parsed, err := simple.ParseRedeemedEvent(*receipt.Logs[0]) + Require(t, err) + aliasedSender := util.RemapL1Address(usertxopts.From) + if parsed.Caller != aliasedSender { + Fail(t, "Unexpected caller", parsed.Caller, "expected", aliasedSender) + } + if parsed.Redeemer != ownerTxOpts.From { + Fail(t, "Unexpected redeemer", parsed.Redeemer, "expected", ownerTxOpts.From) + } } func TestSubmissionGasCosts(t *testing.T) { @@ -272,12 +297,13 @@ func TestSubmissionGasCosts(t *testing.T) { Require(t, err) usefulGas := params.TxGas - excessGas := uint64(808) + excessGasLimit := uint64(808) maxSubmissionFee := big.NewInt(1e13) - retryableGas := arbmath.UintToBig(usefulGas + excessGas) // will only burn the intrinsic cost + retryableGas := arbmath.UintToBig(usefulGas + excessGasLimit) // will only burn the intrinsic cost retryableL2CallValue := big.NewInt(1e4) retryableCallData := []byte{} + gasFeeCap := big.NewInt(l2pricing.InitialBaseFeeWei * 2) l1tx, err := delayedInbox.CreateRetryableTicket( &usertxopts, receiveAddress, @@ -286,7 +312,7 @@ func TestSubmissionGasCosts(t *testing.T) { feeRefundAddress, beneficiaryAddress, retryableGas, - big.NewInt(l2pricing.InitialBaseFeeWei*2), + gasFeeCap, retryableCallData, ) Require(t, err) @@ -299,13 +325,9 @@ func TestSubmissionGasCosts(t *testing.T) { waitForL1DelayBlocks(t, ctx, l1client, l1info) l2BaseFee := GetBaseFee(t, l2client, ctx) - excessWei := arbmath.BigMulByUint(l2BaseFee, excessGas) - - l1HeaderAfterSubmit, err := l1client.HeaderByHash(ctx, l1receipt.BlockHash) - Require(t, err) - l1BaseFee := l1HeaderAfterSubmit.BaseFee - submitFee := arbmath.BigMulByUint(l1BaseFee, uint64(1400+6*len(retryableCallData))) - submissionFeeRefund := arbmath.BigSub(maxSubmissionFee, submitFee) + excessGasPrice := arbmath.BigSub(gasFeeCap, l2BaseFee) + excessWei := arbmath.BigMulByUint(l2BaseFee, excessGasLimit) + excessWei.Add(excessWei, arbmath.BigMul(excessGasPrice, retryableGas)) fundsAfterSubmit, err := l2client.BalanceAt(ctx, faucetAddress, nil) Require(t, err) @@ -337,16 +359,17 @@ func TestSubmissionGasCosts(t *testing.T) { } // the fee refund address should recieve the excess gas - colors.PrintBlue("Base Fee ", l2BaseFee) - colors.PrintBlue("Excess Gas ", excessGas) - colors.PrintBlue("Excess Wei ", excessWei) - colors.PrintMint("Fee Refund ", refundFunds) - if !arbmath.BigEquals(refundFunds, arbmath.BigAdd(excessWei, submissionFeeRefund)) { + colors.PrintBlue("Base Fee ", l2BaseFee) + colors.PrintBlue("Excess Gas Price ", excessGasPrice) + colors.PrintBlue("Excess Gas ", excessGasLimit) + colors.PrintBlue("Excess Wei ", excessWei) + colors.PrintMint("Fee Refund ", refundFunds) + if !arbmath.BigEquals(refundFunds, arbmath.BigAdd(excessWei, maxSubmissionFee)) { Fail(t, "The Fee Refund Address didn't receive the right funds") } // the faucet must pay for both the gas used and the call value supplied - expectedGasChange := arbmath.BigMul(l2BaseFee, retryableGas) + expectedGasChange := arbmath.BigMul(gasFeeCap, retryableGas) expectedGasChange = arbmath.BigSub(expectedGasChange, usertxopts.Value) // the user is credited this expectedGasChange = arbmath.BigAdd(expectedGasChange, maxSubmissionFee) expectedGasChange = arbmath.BigAdd(expectedGasChange, retryableL2CallValue) diff --git a/system_tests/seq_whitelist_test.go b/system_tests/seq_whitelist_test.go new file mode 100644 index 0000000000..7cfdf1caca --- /dev/null +++ b/system_tests/seq_whitelist_test.go @@ -0,0 +1,42 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode" +) + +func TestSequencerWhitelist(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + config := arbnode.ConfigDefaultL2Test() + config.Sequencer.SenderWhitelist = []string{ + GetTestAddressForAccountName(t, "Owner").String(), + GetTestAddressForAccountName(t, "User").String(), + } + l2info, _, client := CreateTestL2WithConfig(t, ctx, nil, config, true) + + l2info.GenerateAccount("User") + l2info.GenerateAccount("User2") + + // Owner is on the whitelist + TransferBalance(t, "Owner", "User", big.NewInt(params.Ether), l2info, client, ctx) + TransferBalance(t, "Owner", "User2", big.NewInt(params.Ether), l2info, client, ctx) + + // User is on the whitelist + TransferBalance(t, "User", "User2", big.NewInt(params.Ether/10), l2info, client, ctx) + + // User2 is *not* on the whitelist, therefore this should fail + tx := l2info.PrepareTx("User2", "User", l2info.TransferGas, big.NewInt(params.Ether/10), nil) + err := client.SendTransaction(ctx, tx) + if err == nil { + Fail(t, "transaction from user not on whitelist accepted") + } +} diff --git a/system_tests/seqcompensation_test.go b/system_tests/seqcompensation_test.go index 56b70a9cb1..459af8474a 100644 --- a/system_tests/seqcompensation_test.go +++ b/system_tests/seqcompensation_test.go @@ -13,8 +13,9 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" ) -// Sequencer address gets something for posting batches +// L1 Pricer pool address gets something when the sequencer posts batches func TestSequencerCompensation(t *testing.T) { + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() l2info, node1, l2clientA, l1info, _, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) @@ -50,15 +51,9 @@ func TestSequencerCompensation(t *testing.T) { Fail(t, "Unexpected balance:", l2balance) } - initialSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.SequencerAddress, big.NewInt(0)) + initialSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) Require(t, err) if initialSeqBalance.Sign() != 0 { Fail(t, "Unexpected initial sequencer balance:", initialSeqBalance) } - finalSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.SequencerAddress, nil) - Require(t, err) - if finalSeqBalance.Sign() <= 0 { - Fail(t, "Unexpected final sequencer balance:", finalSeqBalance) - } - } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 3f1c686616..1fdee7fd66 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -14,12 +14,14 @@ import ( "math/big" "strings" "testing" + "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/validator" ) @@ -186,7 +188,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) defer close(backgroundTxsShutdownChan) err := makeBackgroundTxs(backgroundTxsCtx, l2info, l2clientA, l2clientB, faultyStaker) if !errors.Is(err, context.Canceled) { - t.Error("error making background txs", err) + Fail(t, "error making background txs", err) } })() @@ -210,6 +212,14 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) stakerBTxs++ } } + + if err != nil && strings.Contains(err.Error(), "waiting") { + colors.PrintRed("retrying ", err.Error(), i) + time.Sleep(20 * time.Millisecond) + i-- + continue + } + if err != nil && faultyStaker && i%2 == 1 { // Check if this is an expected error from the faulty staker. if strings.Contains(err.Error(), "agreed with entire challenge") { @@ -247,7 +257,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) isHonestZombie, err := rollup.IsZombie(&bind.CallOpts{}, valWalletAddrA) Require(t, err) if isHonestZombie { - t.Fatal("staker A became a zombie") + Fail(t, "staker A became a zombie") } for j := 0; j < 5; j++ { TransferBalance(t, "Faucet", "Faucet", common.Big0, l1info, l1client, ctx) @@ -255,7 +265,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } if stakerATxs == 0 || stakerBTxs == 0 { - t.Fatal("staker didn't make txs: staker A made", stakerATxs, "staker B made", stakerBTxs) + Fail(t, "staker didn't make txs: staker A made", stakerATxs, "staker B made", stakerBTxs) } latestConfirmedNode, err := rollup.LatestConfirmed(&bind.CallOpts{}) @@ -264,24 +274,24 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if latestConfirmedNode <= 1 && !honestStakerInactive { latestCreatedNode, err := rollup.LatestNodeCreated(&bind.CallOpts{}) Require(t, err) - t.Fatal("latest confirmed node didn't advance:", latestConfirmedNode, latestCreatedNode) + Fail(t, "latest confirmed node didn't advance:", latestConfirmedNode, latestCreatedNode) } if faultyStaker && !sawStakerZombie { - t.Fatal("staker B didn't become a zombie despite being faulty") + Fail(t, "staker B didn't become a zombie despite being faulty") } isStaked, err := rollup.IsStaked(&bind.CallOpts{}, valWalletAddrA) Require(t, err) if !isStaked { - t.Fatal("staker A isn't staked") + Fail(t, "staker A isn't staked") } if !faultyStaker { isStaked, err := rollup.IsStaked(&bind.CallOpts{}, valWalletAddrB) Require(t, err) if !isStaked { - t.Fatal("staker B isn't staked") + Fail(t, "staker B isn't staked") } } } diff --git a/system_tests/test_info.go b/system_tests/test_info.go index 48b5824973..ef838344a5 100644 --- a/system_tests/test_info.go +++ b/system_tests/test_info.go @@ -64,9 +64,7 @@ func NewL1TestInfo(t *testing.T) *BlockchainTestInfo { return NewBlockChainTestInfo(t, types.NewLondonSigner(simulatedChainID), big.NewInt(params.GWei*100), params.TxGas) } -func (b *BlockchainTestInfo) GenerateAccount(name string) { - b.T.Helper() - +func GetTestKeyForAccountName(t *testing.T, name string) *ecdsa.PrivateKey { nameBytes := []byte(name) seedBytes := make([]byte, 0, 128) for len(seedBytes) < 64 { @@ -75,8 +73,20 @@ func (b *BlockchainTestInfo) GenerateAccount(name string) { seedReader := bytes.NewReader(seedBytes) privateKey, err := ecdsa.GenerateKey(crypto.S256(), seedReader) if err != nil { - b.T.Fatal(err) + t.Fatal(err) } + return privateKey +} + +func GetTestAddressForAccountName(t *testing.T, name string) common.Address { + privateKey := GetTestKeyForAccountName(t, name) + return crypto.PubkeyToAddress(privateKey.PublicKey) +} + +func (b *BlockchainTestInfo) GenerateAccount(name string) { + b.T.Helper() + + privateKey := GetTestKeyForAccountName(b.T, name) if b.Accounts[name] != nil { b.T.Fatal("account already exists") } diff --git a/system_tests/wrap_transaction_test.go b/system_tests/wrap_transaction_test.go index 96946f4d69..508f363bac 100644 --- a/system_tests/wrap_transaction_test.go +++ b/system_tests/wrap_transaction_test.go @@ -73,7 +73,7 @@ func WaitForTx(ctxinput context.Context, client arbutil.L1Interface, txhash comm } func EnsureTxSucceeded(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction) (*types.Receipt, error) { - return EnsureTxSucceededWithTimeout(ctx, client, tx, time.Second*2) + return EnsureTxSucceededWithTimeout(ctx, client, tx, time.Second*5) } func EnsureTxSucceededWithTimeout(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction, timeout time.Duration) (*types.Receipt, error) { diff --git a/util/arbmath/math.go b/util/arbmath/math.go index ed1a2e1cdd..ec0f00f6d5 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -28,6 +28,14 @@ func MinInt(value, ceiling int64) int64 { return value } +// the minimum of two uints +func MinUint(value, ceiling uint64) uint64 { + if value > ceiling { + return ceiling + } + return value +} + // the maximum of two ints func MaxInt(value, floor int64) int64 { if value < floor { @@ -90,6 +98,24 @@ func BigGreaterThan(first, second *big.Int) bool { return first.Cmp(second) > 0 } +// returns a clone of the minimum of two big integers +func BigMin(first, second *big.Int) *big.Int { + if BigLessThan(first, second) { + return new(big.Int).Set(first) + } else { + return new(big.Int).Set(second) + } +} + +// returns a clone of the maximum of two big integers +func BigMax(first, second *big.Int) *big.Int { + if BigGreaterThan(first, second) { + return new(big.Int).Set(first) + } else { + return new(big.Int).Set(second) + } +} + // add a huge to another func BigAdd(augend *big.Int, addend *big.Int) *big.Int { return new(big.Int).Add(augend, addend) @@ -110,6 +136,16 @@ func BigDiv(dividend *big.Int, divisor *big.Int) *big.Int { return new(big.Int).Div(dividend, divisor) } +// absolute value of a huge +func BigAbs(value *big.Int) *big.Int { + return new(big.Int).Abs(value) +} + +// add a uint to a huge +func BigAddByUint(augend *big.Int, addend uint64) *big.Int { + return new(big.Int).Add(augend, UintToBig(addend)) +} + // multiply a huge by a rational func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int { value = new(big.Int).Set(value) @@ -233,6 +269,16 @@ func SaturatingUCast(value int64) uint64 { return uint64(value) } +func SaturatingCastToUint(value *big.Int) uint64 { + if value.Sign() < 0 { + return 0 + } + if !value.IsUint64() { + return math.MaxUint64 + } + return value.Uint64() +} + // the number of eth-words needed to store n bytes func WordsForBytes(nbytes uint64) uint64 { return (nbytes + 31) / 32 diff --git a/validator/block_validator.go b/validator/block_validator.go index fc12261c75..74cd7b381b 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -204,13 +204,13 @@ func (v *BlockValidator) readLastBlockValidatedDbInfo() error { return nil } -func (v *BlockValidator) prepareBlock(header *types.Header, prevHeader *types.Header, msg arbstate.MessageWithMetadata, validationStatus *validationStatus) { - preimages, hasDelayedMessage, delayedMsgToRead, err := BlockDataForValidation(v.blockchain, header, prevHeader, msg, v.config.StorePreimages) +func (v *BlockValidator) prepareBlock(ctx context.Context, header *types.Header, prevHeader *types.Header, msg arbstate.MessageWithMetadata, validationStatus *validationStatus) { + preimages, readBatchInfo, hasDelayedMessage, delayedMsgToRead, err := BlockDataForValidation(ctx, v.blockchain, v.inboxReader, header, prevHeader, msg, v.config.StorePreimages) if err != nil { log.Error("failed to set up validation", "err", err, "header", header, "prevHeader", prevHeader) return } - validationEntry, err := newValidationEntry(prevHeader, header, hasDelayedMessage, delayedMsgToRead, preimages) + validationEntry, err := newValidationEntry(prevHeader, header, hasDelayedMessage, delayedMsgToRead, preimages, readBatchInfo) if err != nil { log.Error("failed to create validation entry", "err", err, "header", header, "prevHeader", prevHeader) return @@ -255,7 +255,7 @@ func (v *BlockValidator) NewBlock(block *types.Block, prevHeader *types.Header, if v.nextValidationEntryBlock <= blockNum { v.nextValidationEntryBlock = blockNum + 1 } - v.LaunchUntrackedThread(func() { v.prepareBlock(block.Header(), prevHeader, msg, status) }) + v.LaunchUntrackedThread(func() { v.prepareBlock(context.Background(), block.Header(), prevHeader, msg, status) }) } var launchTime = time.Now().Format("2006_01_02__15_04") @@ -398,10 +398,14 @@ func (v *BlockValidator) validate(ctx context.Context, validationStatus *validat default: } }() + entry.BatchInfo = append(entry.BatchInfo, BatchInfo{ + Number: entry.StartPosition.BatchNumber, + Data: seqMsg, + }) log.Info("starting validation for block", "blockNr", entry.BlockNumber) for _, moduleRoot := range validationStatus.ModuleRoots { before := time.Now() - gsEnd, delayedMsg, err := v.executeBlock(ctx, entry, seqMsg, moduleRoot) + gsEnd, delayedMsg, err := v.executeBlock(ctx, entry, moduleRoot) duration := time.Since(before) if err != nil { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { diff --git a/validator/challenge_manager.go b/validator/challenge_manager.go index a43c67ec17..02fa735b2f 100644 --- a/validator/challenge_manager.go +++ b/validator/challenge_manager.go @@ -7,9 +7,10 @@ import ( "context" "encoding/binary" "fmt" - "github.com/offchainlabs/nitro/arbstate" "math/big" + "github.com/offchainlabs/nitro/arbstate" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" @@ -416,12 +417,14 @@ func (m *ChallengeManager) createInitialMachine(ctx context.Context, blockNum in if err != nil { return err } + var batchInfo []BatchInfo if tooFar { // Just record the part of block creation before the message is read - _, preimages, err := RecordBlockCreation(m.blockchain, blockHeader, nil) + _, preimages, readBatchInfo, err := RecordBlockCreation(ctx, m.blockchain, m.inboxReader, blockHeader, nil, true) if err != nil { return err } + batchInfo = readBatchInfo err = SetMachinePreimageResolver(ctx, machine, preimages, nil, m.blockchain, m.das) if err != nil { return err @@ -440,7 +443,7 @@ func (m *ChallengeManager) createInitialMachine(ctx context.Context, blockNum in if nextHeader == nil { return fmt.Errorf("next block header %v after challenge point unknown", blockNum+1) } - preimages, hasDelayedMsg, delayedMsgNr, err := BlockDataForValidation(m.blockchain, nextHeader, blockHeader, message, false) + preimages, readBatchInfo, hasDelayedMsg, delayedMsgNr, err := BlockDataForValidation(ctx, m.blockchain, m.inboxReader, nextHeader, blockHeader, message, false) if err != nil { return err } @@ -448,7 +451,12 @@ func (m *ChallengeManager) createInitialMachine(ctx context.Context, blockNum in if err != nil { return err } - err = SetMachinePreimageResolver(ctx, machine, preimages, batchBytes, m.blockchain, m.das) + readBatchInfo = append(readBatchInfo, BatchInfo{ + Number: startGlobalState.Batch, + Data: batchBytes, + }) + batchInfo = readBatchInfo + err = SetMachinePreimageResolver(ctx, machine, preimages, batchInfo, m.blockchain, m.das) if err != nil { return err } @@ -462,7 +470,9 @@ func (m *ChallengeManager) createInitialMachine(ctx context.Context, blockNum in return err } } - err = machine.AddSequencerInboxMessage(startGlobalState.Batch, batchBytes) + } + for _, batch := range batchInfo { + err = machine.AddSequencerInboxMessage(batch.Number, batch.Data) if err != nil { return err } diff --git a/validator/l1_validator.go b/validator/l1_validator.go index 992fc8565d..358ef5a99a 100644 --- a/validator/l1_validator.go +++ b/validator/l1_validator.go @@ -6,9 +6,10 @@ package validator import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" "math/big" + "github.com/offchainlabs/nitro/arbstate" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -558,8 +559,16 @@ func (v *L1Validator) createNewNodeAction( NumBlocks: assertionNumBlocks, } + wasmModuleRoot := v.lastWasmModuleRoot + if v.blockValidator == nil { + wasmModuleRoot, err = v.rollup.WasmModuleRoot(v.getCallOpts(ctx)) + if err != nil { + return nil, err + } + } + executionHash := assertion.ExecutionHash() - newNodeHash := crypto.Keccak256Hash(hasSiblingByte[:], lastHash[:], executionHash[:], validatedBatchAcc[:]) + newNodeHash := crypto.Keccak256Hash(hasSiblingByte[:], lastHash[:], executionHash[:], validatedBatchAcc[:], wasmModuleRoot[:]) action := createNodeAction{ assertion: assertion, diff --git a/validator/rollup_watcher.go b/validator/rollup_watcher.go index baab74ebbb..6d48331661 100644 --- a/validator/rollup_watcher.go +++ b/validator/rollup_watcher.go @@ -177,7 +177,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, if i > 0 { lastHashIsSibling[0] = 1 } - lastHash = crypto.Keccak256Hash(lastHashIsSibling[:], lastHash[:], parsedLog.ExecutionHash[:], parsedLog.AfterInboxBatchAcc[:]) + lastHash = crypto.Keccak256Hash(lastHashIsSibling[:], lastHash[:], parsedLog.ExecutionHash[:], parsedLog.AfterInboxBatchAcc[:], parsedLog.WasmModuleRoot[:]) infos = append(infos, &NodeInfo{ NodeNum: parsedLog.NodeNum, BlockProposed: ethLog.BlockNumber, diff --git a/validator/staker.go b/validator/staker.go index 6fd84be606..cb748ae93c 100644 --- a/validator/staker.go +++ b/validator/staker.go @@ -266,7 +266,9 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { } // If the wallet address is zero, or the wallet address isn't staked, // this will return the latest node and its hash (atomically). - latestStakedNodeNum, latestStakedNodeInfo, err := s.validatorUtils.LatestStaked(callOpts, s.rollupAddress, walletAddressOrZero) + latestStakedNodeNum, latestStakedNodeInfo, err := s.validatorUtils.LatestStaked( + callOpts, s.rollupAddress, walletAddressOrZero, + ) if err != nil { return nil, err } diff --git a/validator/stateless_block_validator.go b/validator/stateless_block_validator.go index c7346df44a..08a8a59546 100644 --- a/validator/stateless_block_validator.go +++ b/validator/stateless_block_validator.go @@ -6,12 +6,14 @@ package validator import ( "context" "fmt" + "github.com/offchainlabs/nitro/arbutil" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -136,6 +138,7 @@ type validationEntry struct { StartPosition GlobalStatePosition EndPosition GlobalStatePosition Preimages map[common.Hash][]byte + BatchInfo []BatchInfo } func (v *validationEntry) start() GoGlobalState { @@ -164,6 +167,7 @@ func newValidationEntry( hasDelayed bool, delayedMsgNr uint64, preimages map[common.Hash][]byte, + batchInfo []BatchInfo, ) (*validationEntry, error) { extraInfo, err := types.DeserializeHeaderExtraInformation(header) if err != nil { @@ -183,6 +187,7 @@ func newValidationEntry( HasDelayedMsg: hasDelayed, DelayedMsgNr: delayedMsgNr, Preimages: preimages, + BatchInfo: batchInfo, }, nil } @@ -212,11 +217,32 @@ func NewStatelessBlockValidator( return validator, nil } +type BatchInfo struct { + Number uint64 + Data []byte +} + // If msg is nil, this will record block creation up to the point where message would be accessed (for a "too far" proof) -func RecordBlockCreation(blockchain *core.BlockChain, prevHeader *types.Header, msg *arbstate.MessageWithMetadata) (common.Hash, map[common.Hash][]byte, error) { - recordingdb, chaincontext, recordingKV, err := arbitrum.PrepareRecording(blockchain, prevHeader) - if err != nil { - return common.Hash{}, nil, err +func RecordBlockCreation(ctx context.Context, blockchain *core.BlockChain, inboxReader InboxReaderInterface, prevHeader *types.Header, msg *arbstate.MessageWithMetadata, producePreimages bool) (common.Hash, map[common.Hash][]byte, []BatchInfo, error) { + var recordingdb *state.StateDB + var chaincontext core.ChainContext + var recordingKV *arbitrum.RecordingKV + var err error + if producePreimages { + recordingdb, chaincontext, recordingKV, err = arbitrum.PrepareRecording(blockchain, prevHeader) + if err != nil { + return common.Hash{}, nil, nil, err + } + } else { + var prevRoot common.Hash + if prevHeader != nil { + prevRoot = prevHeader.Root + } + recordingdb, err = blockchain.StateAt(prevRoot) + if err != nil { + return common.Hash{}, nil, nil, err + } + chaincontext = blockchain } chainConfig := blockchain.Config() @@ -226,36 +252,57 @@ func RecordBlockCreation(blockchain *core.BlockChain, prevHeader *types.Header, if prevHeader != nil { initialArbosState, err := arbosState.OpenSystemArbosState(recordingdb, nil, true) if err != nil { - return common.Hash{}, nil, fmt.Errorf("error opening initial ArbOS state: %w", err) + return common.Hash{}, nil, nil, fmt.Errorf("error opening initial ArbOS state: %w", err) } chainId, err := initialArbosState.ChainId() if err != nil { - return common.Hash{}, nil, fmt.Errorf("error getting chain ID from initial ArbOS state: %w", err) + return common.Hash{}, nil, nil, fmt.Errorf("error getting chain ID from initial ArbOS state: %w", err) } if chainId.Cmp(chainConfig.ChainID) != 0 { - return common.Hash{}, nil, fmt.Errorf("unexpected chain ID %v in ArbOS state, expected %v", chainId, chainConfig.ChainID) + return common.Hash{}, nil, nil, fmt.Errorf("unexpected chain ID %v in ArbOS state, expected %v", chainId, chainConfig.ChainID) } } var blockHash common.Hash + var readBatchInfo []BatchInfo if msg != nil { - block, _ := arbos.ProduceBlock( + batchFetcher := func(batchNum uint64) ([]byte, error) { + data, err := inboxReader.GetSequencerMessageBytes(ctx, batchNum) + if err != nil { + return nil, err + } + readBatchInfo = append(readBatchInfo, BatchInfo{ + Number: batchNum, + Data: data, + }) + return data, nil + } + block, _, err := arbos.ProduceBlock( msg.Message, msg.DelayedMessagesRead, prevHeader, recordingdb, chaincontext, chainConfig, + batchFetcher, ) + if err != nil { + return common.Hash{}, nil, nil, err + } blockHash = block.Hash() } - preimages, err := arbitrum.PreimagesFromRecording(chaincontext, recordingKV) - - return blockHash, preimages, err + var preimages map[common.Hash][]byte + if recordingKV != nil { + preimages, err = arbitrum.PreimagesFromRecording(chaincontext, recordingKV) + if err != nil { + return common.Hash{}, nil, nil, err + } + } + return blockHash, preimages, readBatchInfo, err } -func BlockDataForValidation(blockchain *core.BlockChain, header, prevHeader *types.Header, msg arbstate.MessageWithMetadata, producePreimages bool) (preimages map[common.Hash][]byte, hasDelayedMessage bool, delayedMsgNr uint64, err error) { +func BlockDataForValidation(ctx context.Context, blockchain *core.BlockChain, inboxReader InboxReaderInterface, header, prevHeader *types.Header, msg arbstate.MessageWithMetadata, producePreimages bool) (preimages map[common.Hash][]byte, readBatchInfo []BatchInfo, hasDelayedMessage bool, delayedMsgNr uint64, err error) { var prevHash common.Hash if prevHeader != nil { prevHash = prevHeader.Hash() @@ -265,9 +312,9 @@ func BlockDataForValidation(blockchain *core.BlockChain, header, prevHeader *typ return } - if prevHeader != nil && producePreimages { + if prevHeader != nil { var blockhash common.Hash - blockhash, preimages, err = RecordBlockCreation(blockchain, prevHeader, &msg) + blockhash, preimages, readBatchInfo, err = RecordBlockCreation(ctx, blockchain, inboxReader, prevHeader, &msg, producePreimages) if err != nil { return } @@ -287,23 +334,25 @@ func BlockDataForValidation(blockchain *core.BlockChain, header, prevHeader *typ return } -func SetMachinePreimageResolver(ctx context.Context, mach *ArbitratorMachine, preimages map[common.Hash][]byte, seqMsg []byte, bc *core.BlockChain, das arbstate.DataAvailabilityReader) error { +func SetMachinePreimageResolver(ctx context.Context, mach *ArbitratorMachine, preimages map[common.Hash][]byte, batchInfo []BatchInfo, bc *core.BlockChain, das arbstate.DataAvailabilityReader) error { recordNewPreimages := true if preimages == nil { preimages = make(map[common.Hash][]byte) recordNewPreimages = false } - if arbstate.IsDASMessageHeaderByte(seqMsg[40]) { - if das == nil { - log.Error("No DAS configured, but sequencer message found with DAS header") - if bc.Config().ArbitrumChainParams.DataAvailabilityCommittee { - return errors.New("processing data availability chain without DAS configured") - } - } else { - _, err := arbstate.RecoverPayloadFromDasBatch(ctx, seqMsg, das, preimages) - if err != nil { - return err + for _, batch := range batchInfo { + if len(batch.Data) >= 41 && arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if das == nil { + log.Error("No DAS configured, but sequencer message found with DAS header") + if bc.Config().ArbitrumChainParams.DataAvailabilityCommittee { + return errors.New("processing data availability chain without DAS configured") + } + } else { + _, err := arbstate.RecoverPayloadFromDasBatch(ctx, batch.Data, das, preimages) + if err != nil { + return err + } } } } @@ -336,7 +385,7 @@ func SetMachinePreimageResolver(ctx context.Context, mach *ArbitratorMachine, pr }) } -func (v *StatelessBlockValidator) executeBlock(ctx context.Context, entry *validationEntry, seqMsg []byte, moduleRoot common.Hash) (GoGlobalState, []byte, error) { +func (v *StatelessBlockValidator) executeBlock(ctx context.Context, entry *validationEntry, moduleRoot common.Hash) (GoGlobalState, []byte, error) { start := entry.StartPosition gsStart := entry.start() @@ -345,7 +394,7 @@ func (v *StatelessBlockValidator) executeBlock(ctx context.Context, entry *valid return GoGlobalState{}, nil, fmt.Errorf("unabled to get WASM machine: %w", err) } mach := basemachine.Clone() - err = SetMachinePreimageResolver(ctx, mach, entry.Preimages, seqMsg, v.blockchain, v.daService) + err = SetMachinePreimageResolver(ctx, mach, entry.Preimages, entry.BatchInfo, v.blockchain, v.daService) if err != nil { return GoGlobalState{}, nil, err } @@ -354,10 +403,12 @@ func (v *StatelessBlockValidator) executeBlock(ctx context.Context, entry *valid log.Error("error while setting global state for proving", "err", err, "gsStart", gsStart) return GoGlobalState{}, nil, errors.New("error while setting global state for proving") } - err = mach.AddSequencerInboxMessage(start.BatchNumber, seqMsg) - if err != nil { - log.Error("error while trying to add sequencer msg for proving", "err", err, "seq", start.BatchNumber, "blockNr", entry.BlockNumber) - return GoGlobalState{}, nil, errors.New("error while trying to add sequencer msg for proving") + for _, batch := range entry.BatchInfo { + err = mach.AddSequencerInboxMessage(batch.Number, batch.Data) + if err != nil { + log.Error("error while trying to add sequencer msg for proving", "err", err, "seq", start.BatchNumber, "blockNr", entry.BlockNumber) + return GoGlobalState{}, nil, errors.New("error while trying to add sequencer msg for proving") + } } var delayedMsg []byte if entry.HasDelayedMsg { @@ -406,7 +457,7 @@ func (v *StatelessBlockValidator) ValidateBlock(ctx context.Context, header *typ if err != nil { return false, err } - preimages, hasDelayedMessage, delayedMsgToRead, err := BlockDataForValidation(v.blockchain, header, prevHeader, msg, false) + preimages, readBatchInfo, hasDelayedMessage, delayedMsgToRead, err := BlockDataForValidation(ctx, v.blockchain, v.inboxReader, header, prevHeader, msg, false) if err != nil { return false, fmt.Errorf("failed to get block data to validate: %w", err) } @@ -425,7 +476,7 @@ func (v *StatelessBlockValidator) ValidateBlock(ctx context.Context, header *typ return false, fmt.Errorf("failed calculating position for validation: %w", err) } - entry, err := newValidationEntry(prevHeader, header, hasDelayedMessage, delayedMsgToRead, preimages) + entry, err := newValidationEntry(prevHeader, header, hasDelayedMessage, delayedMsgToRead, preimages, readBatchInfo) if err != nil { return false, fmt.Errorf("failed to create validation entry %w", err) } @@ -436,8 +487,12 @@ func (v *StatelessBlockValidator) ValidateBlock(ctx context.Context, header *typ if err != nil { return false, err } + entry.BatchInfo = append(entry.BatchInfo, BatchInfo{ + Number: startPos.BatchNumber, + Data: seqMsg, + }) - gsEnd, _, err := v.executeBlock(ctx, entry, seqMsg, moduleRoot) + gsEnd, _, err := v.executeBlock(ctx, entry, moduleRoot) if err != nil { return false, err }