diff --git a/Cargo.toml b/Cargo.toml index 954a781e..45332320 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,29 +138,50 @@ scroll-alloy-consensus = { git = "https://github.com/scroll-tech/reth.git", defa scroll-alloy-hardforks = { git = "https://github.com/scroll-tech/reth.git", default-features = false } scroll-alloy-network = { git = "https://github.com/scroll-tech/reth.git", default-features = false } scroll-alloy-provider = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +scroll-alloy-evm = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +scroll-alloy-rpc-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } scroll-alloy-rpc-types-engine = { git = "https://github.com/scroll-tech/reth.git", default-features = false } # reth reth-chainspec = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-e2e-test-utils = { git = "https://github.com/scroll-tech/reth.git" } +reth-eth-wire = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-eth-wire-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-network = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-network-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-network-p2p = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-network-peers = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-network-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-node-builder = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-node-core = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-node-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-node-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-payload-primitives = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-primitives = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-primitives-traits = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-provider = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-rpc-builder = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-rpc-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-rpc-eth-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-rpc-eth-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-rpc-server-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-storage-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-tasks = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-tokio-util = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-tracing = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-transaction-pool = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-trie-db = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-testing-utils = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-revm = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-evm = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-engine-local = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-cli-util = { git = "https://github.com/scroll-tech/reth.git", default-features = false } # reth-scroll reth-scroll-chainspec = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-scroll-cli = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-scroll-evm = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-scroll-rpc = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-scroll-engine-primitives = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-scroll-forks = { git = "https://github.com/scroll-tech/reth.git", default-features = false } reth-scroll-node = { git = "https://github.com/scroll-tech/reth.git", default-features = false } diff --git a/Dockerfile.test b/Dockerfile.test index 40fa48e2..112e674d 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -4,7 +4,7 @@ ARG CARGO_FEATURES="" # Install basic packages RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config -RUN cargo install cargo-chef --locked --version 0.1.71 +RUN cargo install cargo-chef --locked FROM chef AS planner WORKDIR /app @@ -23,17 +23,18 @@ RUN mkdir -p tests/src && \ echo 'name = "tests"' >> tests/Cargo.toml && \ echo 'path = "src/lib.rs"' >> tests/Cargo.toml && \ echo 'pub fn dummy() {}' > tests/src/lib.rs -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - cargo chef prepare --recipe-path /recipe.json +RUN cargo chef prepare --recipe-path /recipe.json FROM chef AS builder WORKDIR /app COPY --from=planner /recipe.json recipe.json RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ cargo chef cook --release --recipe-path recipe.json COPY . . RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ cargo build ${CARGO_FEATURES:+--features $CARGO_FEATURES} --release --target-dir=/app-target # Release diff --git a/crates/chain-orchestrator/Cargo.toml b/crates/chain-orchestrator/Cargo.toml index 16fa7a2a..3462acaf 100644 --- a/crates/chain-orchestrator/Cargo.toml +++ b/crates/chain-orchestrator/Cargo.toml @@ -32,7 +32,7 @@ scroll-network.workspace = true # reth reth-chainspec.workspace = true -reth-network-p2p = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-network-p2p.workspace = true reth-network-peers.workspace = true reth-primitives-traits.workspace = true diff --git a/crates/engine/Cargo.toml b/crates/engine/Cargo.toml index 318b885c..0d822e9f 100644 --- a/crates/engine/Cargo.toml +++ b/crates/engine/Cargo.toml @@ -32,7 +32,7 @@ reth-primitives-traits.workspace = true # reth-scroll reth-scroll-chainspec.workspace = true reth-scroll-primitives.workspace = true -reth-scroll-engine-primitives = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-scroll-engine-primitives.workspace = true # rollup-node rollup-node-primitives.workspace = true @@ -56,7 +56,7 @@ tracing.workspace = true alloy-consensus.workspace = true arbitrary.workspace = true async-trait.workspace = true -reth-testing-utils = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-testing-utils.workspace = true rollup-node-providers = { workspace = true, features = ["test-utils"] } scroll-alloy-consensus.workspace = true scroll-alloy-rpc-types-engine = { workspace = true, features = ["arbitrary"] } diff --git a/crates/network/Cargo.toml b/crates/network/Cargo.toml index 83537593..8dceb191 100644 --- a/crates/network/Cargo.toml +++ b/crates/network/Cargo.toml @@ -15,10 +15,10 @@ reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-network.workspace = true reth-network-api.workspace = true -reth-network-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-network-types.workspace = true reth-network-peers.workspace = true reth-primitives-traits.workspace = true -reth-storage-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-storage-api.workspace = true reth-tokio-util.workspace = true # scroll diff --git a/crates/network/src/manager.rs b/crates/network/src/manager.rs index 9ad6835a..970bcb68 100644 --- a/crates/network/src/manager.rs +++ b/crates/network/src/manager.rs @@ -210,7 +210,7 @@ impl< // Announce block to the filtered set of peers for peer_id in peers { - trace!(target: "scroll::network::manager", peer_id = %peer_id, block_hash = %hash, "Announcing new block to peer"); + trace!(target: "scroll::network::manager", peer_id = %peer_id, block_number = %block.block.header.number, block_hash = %hash, "Announcing new block to peer"); self.scroll_wire.announce_block(peer_id, &block, hash); } } @@ -315,7 +315,7 @@ impl< if self.blocks_seen.contains(&(block_hash, signature)) { return None; } - trace!(target: "scroll::bridge::import", peer_id = %peer_id, block_hash = %block_hash, "Received new block from eth-wire protocol"); + trace!(target: "scroll::bridge::import", peer_id = %peer_id, block_hash = %block_hash, signature = %signature.to_string(), extra_data = %extra_data.to_string(), "Received new block from eth-wire protocol"); // Update the state of the peer cache i.e. peer has seen this block. self.scroll_wire diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index cca77f27..fa4adf37 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -28,37 +28,37 @@ alloy-signer = "1.0.30" alloy-transport.workspace = true scroll-alloy-consensus.workspace = true -scroll-alloy-evm = { git = "https://github.com/scroll-tech/reth.git" } +scroll-alloy-evm.workspace = true scroll-alloy-hardforks.workspace = true scroll-alloy-network.workspace = true scroll-alloy-provider.workspace = true reth-primitives-traits.workspace = true reth-scroll-engine-primitives.workspace = true -reth-scroll-evm = { git = "https://github.com/scroll-tech/reth.git" } -reth-scroll-cli = { git = "https://github.com/scroll-tech/reth.git" } +reth-scroll-evm.workspace = true +reth-scroll-cli.workspace = true reth-scroll-primitives.workspace = true reth-scroll-chainspec.workspace = true reth-scroll-node.workspace = true -reth-scroll-rpc = { git = "https://github.com/scroll-tech/reth.git" } +reth-scroll-rpc.workspace = true reth-chainspec.workspace = true -reth-cli-util = { git = "https://github.com/scroll-tech/reth.git" } +reth-cli-util.workspace = true reth-eth-wire-types.workspace = true -reth-evm = { git = "https://github.com/scroll-tech/reth.git" } +reth-evm.workspace = true reth-node-builder.workspace = true -reth-node-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-node-api.workspace = true reth-node-core.workspace = true -reth-node-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-node-types.workspace = true reth-network.workspace = true reth-network-api.workspace = true -reth-revm = { git = "https://github.com/scroll-tech/reth.git", default-features = false } -reth-rpc-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } -reth-rpc-eth-api = { git = "https://github.com/scroll-tech/reth.git", default-features = false } -reth-rpc-eth-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-revm.workspace = true +reth-rpc-api.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-eth-types.workspace = true reth-tasks.workspace = true -reth-transaction-pool = { git = "https://github.com/scroll-tech/reth.git", default-features = false } -reth-trie-db = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-transaction-pool.workspace = true +reth-trie-db.workspace = true # rollup node rollup-node-chain-orchestrator.workspace = true @@ -76,12 +76,12 @@ aws-sdk-kms = "1.76.0" # test-utils alloy-rpc-types-engine = { workspace = true, optional = true } reth-e2e-test-utils = { workspace = true, optional = true } -reth-engine-local = { git = "https://github.com/scroll-tech/reth.git", default-features = false, optional = true } +reth-engine-local = { workspace = true, optional = true } reth-provider = { workspace = true, optional = true } reth-rpc-server-types = { workspace = true, optional = true } scroll-alloy-rpc-types-engine = { workspace = true, optional = true } scroll-derivation-pipeline = { workspace = true, optional = true } -scroll-alloy-rpc-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +scroll-alloy-rpc-types.workspace = true scroll-db.workspace = true scroll-engine.workspace = true diff --git a/crates/scroll-wire/Cargo.toml b/crates/scroll-wire/Cargo.toml index c3b9bd47..d98dd7f4 100644 --- a/crates/scroll-wire/Cargo.toml +++ b/crates/scroll-wire/Cargo.toml @@ -15,7 +15,7 @@ alloy-primitives = { workspace = true, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false } # reth -reth-eth-wire = { git = "https://github.com/scroll-tech/reth.git", default-features = false } +reth-eth-wire.workspace = true reth-network.workspace = true reth-network-api.workspace = true diff --git a/crates/sequencer/Cargo.toml b/crates/sequencer/Cargo.toml index bd6c1fe0..49c6cdc4 100644 --- a/crates/sequencer/Cargo.toml +++ b/crates/sequencer/Cargo.toml @@ -51,7 +51,7 @@ scroll-alloy-consensus.workspace = true # reth reth-e2e-test-utils.workspace = true reth-node-core.workspace = true -reth-tracing = { git = "https://github.com/scroll-tech/reth.git" } +reth-tracing.workspace = true # reth-scroll reth-scroll-chainspec.workspace = true diff --git a/tests/l2geth-genesis-e2e.json b/tests/l2geth-genesis-e2e.json index a63be7b0..fcf67d33 100644 --- a/tests/l2geth-genesis-e2e.json +++ b/tests/l2geth-genesis-e2e.json @@ -23,6 +23,7 @@ "feynmanTime": 0, "systemContract": { "period": 1, + "blocks_per_second": 2, "system_contract_address": "0x55B150d210356452e4E79cCb6B778b4e1B167091", "system_contract_slot": "0x0000000000000000000000000000000000000000000000000000000000000067" }, diff --git a/tests/launch_l2geth_follower.bash b/tests/launch_l2geth_follower.bash index 0e4b06d3..51ff1de4 100644 --- a/tests/launch_l2geth_follower.bash +++ b/tests/launch_l2geth_follower.bash @@ -3,17 +3,13 @@ set -e geth init --datadir=/l2geth /l2geth-genesis-e2e.json -# Create config.toml with static nodes instead of bootnodes -echo '[Node.P2P]' > /l2geth/config.toml -echo 'StaticNodes = ["enode://8fc4f6dfd0a2ebf56560d0b0ef5e60ad7bcb01e13f929eae53a4c77086d9c1e74eb8b8c8945035d25c6287afdd871f0d41b3fd7e189697decd0f13538d1ac620@l2geth-sequencer:30303","enode://e7f7e271f62bd2b697add14e6987419758c97e83b0478bd948f5f2d271495728e7edef5bd78ad65258ac910f28e86928ead0c42ee51f2a0168d8ca23ba939766@rollup-node-sequencer:30303"]' >> /l2geth/config.toml - echo "Starting l2geth as follower..." exec geth --datadir=/l2geth \ - --config /l2geth/config.toml \ --port 30303 --syncmode full --networkid 938471 --nodiscover \ - --http --http.addr 0.0.0.0 --http.port 8545 --http.vhosts "*" --http.corsdomain "*" --http.api "eth,scroll,net,web3,debug" \ - --ws --ws.addr 0.0.0.0 --ws.port 8546 --ws.api "eth,scroll,net,web3,debug" \ + --http --http.addr 0.0.0.0 --http.port 8545 --http.vhosts "*" --http.corsdomain "*" --http.api "admin,eth,scroll,net,web3,debug" \ + --ws --ws.addr 0.0.0.0 --ws.port 8546 --ws.api "admin,eth,scroll,net,web3,debug" \ --pprof --pprof.addr 0.0.0.0 --pprof.port 6060 --metrics --verbosity 5 --log.debug \ --l1.endpoint "http://l1-node:8545" --l1.confirmations finalized --l1.sync.startblock 0 \ --gcmode archive --cache.noprefetch --cache.snapshot=0 --snapshot=false \ + --gossip.enablebroadcasttoall \ --nat extip:0.0.0.0 diff --git a/tests/launch_l2geth_sequencer.bash b/tests/launch_l2geth_sequencer.bash index bb6572da..6cd7207c 100644 --- a/tests/launch_l2geth_sequencer.bash +++ b/tests/launch_l2geth_sequencer.bash @@ -16,11 +16,12 @@ echo "test" > /l2geth/keystore/password.txt echo "Starting l2geth as sequencer..." exec geth --datadir=/l2geth \ --port 30303 --syncmode full --networkid 938471 --nodiscover \ - --http --http.addr 0.0.0.0 --http.port 8545 --http.vhosts "*" --http.corsdomain "*" --http.api "eth,scroll,net,web3,debug,miner" \ - --ws --ws.addr 0.0.0.0 --ws.port 8546 --ws.api "eth,scroll,net,web3,debug,miner" \ + --http --http.addr 0.0.0.0 --http.port 8545 --http.vhosts "*" --http.corsdomain "*" --http.api "admin,eth,scroll,net,web3,debug,miner" \ + --ws --ws.addr 0.0.0.0 --ws.port 8546 --ws.api "admin,eth,scroll,net,web3,debug,miner" \ --pprof --pprof.addr 0.0.0.0 --pprof.port 6060 --metrics --verbosity 5 --log.debug \ --l1.endpoint "http://l1-node:8545" --l1.confirmations finalized --l1.sync.startblock 0 \ --gcmode archive --cache.noprefetch --cache.snapshot=0 --snapshot=false \ --nat extip:0.0.0.0 \ + --gossip.enablebroadcasttoall \ --unlock "0xb674ff99cca262c99d3eab5b32796a99188543da" --password "/l2geth/keystore/password.txt" --allow-insecure-unlock \ - --miner.allowempty + --miner.allowempty --miner.gaslimit 30000000 diff --git a/tests/launch_rollup_node_follower.bash b/tests/launch_rollup_node_follower.bash index c8e5cc06..f0527749 100644 --- a/tests/launch_rollup_node_follower.bash +++ b/tests/launch_rollup_node_follower.bash @@ -1,16 +1,18 @@ #!/usr/bin/env bash set -e -export RUST_LOG=sqlx=off,scroll=trace,reth=trace,rollup=trace,info +export RUST_LOG=sqlx=off,scroll=trace,reth=trace,rollup=trace,trace exec rollup-node node --chain /l2reth/l2reth-genesis-e2e.json --datadir=/l2reth --metrics=0.0.0.0:6060 \ + --disable-discovery \ + --network.valid_signer "0xb674ff99cca262c99d3eab5b32796a99188543da" \ --http --http.addr=0.0.0.0 --http.port=8545 --http.corsdomain "*" --http.api admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev \ --ws --ws.addr=0.0.0.0 --ws.port=8546 --ws.api admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev \ + --rpc.rollup-node \ --log.stdout.format log-fmt -vvv \ --txpool.pending-max-count=1000 \ --builder.gaslimit=30000000 \ --rpc.max-connections=5000 \ - --trusted-peers enode://8fc4f6dfd0a2ebf56560d0b0ef5e60ad7bcb01e13f929eae53a4c77086d9c1e74eb8b8c8945035d25c6287afdd871f0d41b3fd7e189697decd0f13538d1ac620@l2geth-sequencer:30303,enode://e7f7e271f62bd2b697add14e6987419758c97e83b0478bd948f5f2d271495728e7edef5bd78ad65258ac910f28e86928ead0c42ee51f2a0168d8ca23ba939766@rollup-node-sequencer:30303 \ --engine.sync-at-startup false \ --l1.url http://l1-node:8545 \ --blob.mock diff --git a/tests/launch_rollup_node_sequencer.bash b/tests/launch_rollup_node_sequencer.bash index 7224f419..e66fa4b2 100644 --- a/tests/launch_rollup_node_sequencer.bash +++ b/tests/launch_rollup_node_sequencer.bash @@ -8,22 +8,24 @@ echo -n "0xd510c4b7c61a604f800c4f06803b1ee14b9a63de345e53426ae50425f2dbb058" > / echo -n "01c0d9156e199d89814d4b18e9eb64e25de3927f3f6d27b778177f3ff6b610ad" > /l2reth/nodekey # -> enode://e7f7e271f62bd2b697add14e6987419758c97e83b0478bd948f5f2d271495728e7edef5bd78ad65258ac910f28e86928ead0c42ee51f2a0168d8ca23ba939766@rollup-node-sequencer:30303 -export RUST_LOG=sqlx=off,scroll=trace,reth=trace,rollup=trace,info +export RUST_LOG=sqlx=off,scroll=trace,reth=trace,rollup=trace,trace exec rollup-node node --chain /l2reth/l2reth-genesis-e2e.json --datadir=/l2reth --metrics=0.0.0.0:6060 \ + --disable-discovery \ + --network.valid_signer "0xb674ff99cca262c99d3eab5b32796a99188543da" \ --http --http.addr=0.0.0.0 --http.port=8545 --http.corsdomain "*" --http.api admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev \ --ws --ws.addr=0.0.0.0 --ws.port=8546 --ws.api admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev \ + --rpc.rollup-node \ --log.stdout.format log-fmt -vvv \ --sequencer.enabled \ --sequencer.allow-empty-blocks \ --signer.key-file /l2reth/sequencer-key \ - --sequencer.block-time 1000 \ - --sequencer.payload-building-duration 800 \ + --sequencer.block-time 500 \ + --sequencer.payload-building-duration 400 \ --txpool.pending-max-count=1000 \ --builder.gaslimit=30000000 \ --rpc.max-connections=5000 \ --p2p-secret-key /l2reth/nodekey \ - --trusted-peers enode://8fc4f6dfd0a2ebf56560d0b0ef5e60ad7bcb01e13f929eae53a4c77086d9c1e74eb8b8c8945035d25c6287afdd871f0d41b3fd7e189697decd0f13538d1ac620@l2geth-sequencer:30303 \ --engine.sync-at-startup false \ --l1.url http://l1-node:8545 \ --blob.mock diff --git a/tests/src/docker_compose.rs b/tests/src/docker_compose.rs index d2de42c0..f5977701 100644 --- a/tests/src/docker_compose.rs +++ b/tests/src/docker_compose.rs @@ -1,10 +1,7 @@ use alloy_provider::{Provider, ProviderBuilder}; use eyre::Result; use scroll_alloy_network::Scroll; -use std::{ - process::Command, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; +use std::{fs, ops::Deref, process::Command, time::Duration}; use tokio::{ io::{AsyncBufReadExt, BufReader}, process::Command as TokioCommand, @@ -22,6 +19,14 @@ impl NamedProvider { } } +impl Deref for NamedProvider { + type Target = dyn Provider; + + fn deref(&self) -> &Self::Target { + self.provider.as_ref() + } +} + /// The sequencer node RPC URL for the Docker Compose environment. const RN_SEQUENCER_RPC_URL: &str = "http://localhost:8545"; @@ -33,6 +38,9 @@ const L2GETH_SEQUENCER_RPC_URL: &str = "http://localhost:8547"; const L2GETH_FOLLOWER_RPC_URL: &str = "http://localhost:8548"; +const RN_SEQUENCER_ENODE: &str= "enode://e7f7e271f62bd2b697add14e6987419758c97e83b0478bd948f5f2d271495728e7edef5bd78ad65258ac910f28e86928ead0c42ee51f2a0168d8ca23ba939766@{IP}:30303"; +const L2GETH_SEQUENCER_ENODE: &str = "enode://8fc4f6dfd0a2ebf56560d0b0ef5e60ad7bcb01e13f929eae53a4c77086d9c1e74eb8b8c8945035d25c6287afdd871f0d41b3fd7e189697decd0f13538d1ac620@{IP}:30303"; + pub struct DockerComposeEnv { project_name: String, compose_file: String, @@ -43,22 +51,19 @@ impl DockerComposeEnv { /// Create a new DockerComposeEnv and wait for all services to be ready pub async fn new(test_name: &str) -> Result { - let start = SystemTime::now(); - let since_the_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards"); - let timestamp = since_the_epoch.as_secs(); - let project_name = format!("test-{test_name}-{timestamp}"); - let compose_file = "docker-compose.test.yml".to_string(); + let project_name = format!("test-{test_name}"); + let compose_file = "docker-compose.test.yml"; tracing::info!("๐Ÿš€ Starting test environment: {project_name}"); // Pre-cleanup existing containers to avoid conflicts - Self::cleanup(&compose_file, &project_name); + Self::cleanup(compose_file, &project_name, false); // Start the environment - let env = Self::start_environment(&compose_file, &project_name)?; + let env = Self::start_environment(compose_file, &project_name)?; // Start streaming logs in the background - let _ = Self::stream_container_logs(&compose_file, &project_name).await; + let _ = Self::stream_container_logs(compose_file, &project_name).await; // Wait for all services to be ready tracing::info!("โณ Waiting for services to be ready..."); @@ -83,8 +88,8 @@ impl DockerComposeEnv { project_name, "up", "-d", - // "--force-recreate", - // "--build", + "--force-recreate", + "--build", ]) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) @@ -114,9 +119,14 @@ impl DockerComposeEnv { } /// Cleanup the environment - fn cleanup(compose_file: &str, project_name: &str) { + fn cleanup(compose_file: &str, project_name: &str, dump_logs: bool) { tracing::info!("๐Ÿงน Cleaning up environment: {project_name}"); + if dump_logs { + // Dump logs for all containers before cleanup + Self::dump_container_logs_to_files(compose_file, project_name); + } + let _result = Command::new("docker") .args([ "compose", @@ -192,6 +202,8 @@ impl DockerComposeEnv { .await { Ok(provider) => match provider.get_chain_id().await { + // TODO: assert chain ID and genesis hash matches expected values (hardcoded + // constants) Ok(chain_id) => { tracing::info!("โœ… L2 node ready - Chain ID: {chain_id}"); return Ok(()); @@ -254,6 +266,51 @@ impl DockerComposeEnv { Ok(()) } + /// Construct the full container name using Docker Compose naming pattern + fn get_full_container_name(&self, service_name: &str) -> String { + format!("{}-{}-1", self.project_name, service_name) + } + + /// Get the IP address of a container within the project network + fn get_container_ip(&self, container_name: &str) -> Result { + let output = Command::new("docker") + .args([ + "inspect", + "-f", + "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}", + container_name, + ]) + .output() + .map_err(|e| eyre::eyre!("Failed to run docker inspect: {}", e))?; + + if !output.status.success() { + return Err(eyre::eyre!( + "Failed to get container IP for {}: {}", + container_name, + String::from_utf8_lossy(&output.stderr) + )); + } + + let ip = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if ip.is_empty() { + return Err(eyre::eyre!("No IP address found for container {}", container_name)); + } + + Ok(ip) + } + + /// Get the rollup node sequencer enode URL with resolved IP address + pub fn rn_sequencer_enode(&self) -> Result { + let ip = self.get_container_ip(&self.get_full_container_name("rollup-node-sequencer"))?; + Ok(RN_SEQUENCER_ENODE.replace("{IP}", &ip)) + } + + /// Get the l2geth sequencer enode URL with resolved IP address + pub fn l2geth_sequencer_enode(&self) -> Result { + let ip = self.get_container_ip(&self.get_full_container_name("l2geth-sequencer"))?; + Ok(L2GETH_SEQUENCER_ENODE.replace("{IP}", &ip)) + } + /// Show logs for all containers fn show_all_container_logs(compose_file: &str, project_name: &str) { tracing::info!("๐Ÿ” Getting all container logs..."); @@ -273,10 +330,100 @@ impl DockerComposeEnv { } } } + + /// Dump logs for all containers to individual files + fn dump_container_logs_to_files(compose_file: &str, project_name: &str) { + tracing::debug!("๐Ÿ“ Dumping container logs to files for project: {project_name}"); + + // Create logs directory + let logs_dir = format!("target/test-logs/{project_name}"); + if let Err(e) = fs::create_dir_all(&logs_dir) { + tracing::error!("Failed to create logs directory {logs_dir}: {e}"); + return; + } + + // Get list of all containers for this project + let containers_output = Command::new("docker") + .args([ + "compose", + "-f", + compose_file, + "-p", + project_name, + "ps", + "--format", + "{{.Name}}", + ]) + .output(); + + let container_names = match containers_output { + Ok(output) => { + if output.status.success() { + String::from_utf8_lossy(&output.stdout) + .lines() + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect::>() + } else { + tracing::error!( + "Failed to get container list: {}", + String::from_utf8_lossy(&output.stderr) + ); + return; + } + } + Err(e) => { + tracing::error!("Failed to run docker compose ps: {e}"); + return; + } + }; + + // Dump logs for each container + for container_name in container_names { + let log_file_path = format!("{logs_dir}/{container_name}.log"); + + tracing::debug!("๐Ÿ“‹ Dumping logs for container '{container_name}' to {log_file_path}"); + + let logs_output = Command::new("docker").args(["logs", &container_name]).output(); + + match logs_output { + Ok(logs) => { + let mut content = String::new(); + + // Combine stdout and stderr + let stdout = String::from_utf8_lossy(&logs.stdout); + let stderr = String::from_utf8_lossy(&logs.stderr); + + if !stdout.trim().is_empty() { + content.push_str("=== STDOUT ===\n"); + content.push_str(&stdout); + content.push('\n'); + } + + if !stderr.trim().is_empty() { + content.push_str("=== STDERR ===\n"); + content.push_str(&stderr); + content.push('\n'); + } + + if let Err(e) = fs::write(&log_file_path, content) { + tracing::error!("Failed to write logs to {log_file_path}: {e}"); + } else { + tracing::debug!("โœ… Saved logs for '{container_name}' to {log_file_path}"); + } + } + Err(e) => { + tracing::error!("Failed to get logs for container '{container_name}': {e}"); + } + } + } + + tracing::info!("๐Ÿ“ All container logs dumped to: {logs_dir}"); + } } impl Drop for DockerComposeEnv { fn drop(&mut self) { - Self::cleanup(&self.compose_file, &self.project_name); + Self::cleanup(&self.compose_file, &self.project_name, true); } } diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 70beba2f..8c0e8e8c 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,3 +1,4 @@ pub mod docker_compose; pub mod utils; -pub use docker_compose::{DockerComposeEnv, NamedProvider}; +pub use docker_compose::*; +pub use utils::*; diff --git a/tests/src/utils.rs b/tests/src/utils.rs index 5e2a5b10..fc20bc0c 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -7,7 +7,6 @@ use crate::docker_compose::NamedProvider; /// Enable automatic sequencing on a rollup node pub async fn enable_automatic_sequencing(provider: &NamedProvider) -> Result { provider - .provider .client() .request("rollupNode_enableAutomaticSequencing", ()) .await @@ -17,7 +16,6 @@ pub async fn enable_automatic_sequencing(provider: &NamedProvider) -> Result Result { provider - .provider .client() .request("rollupNode_disableAutomaticSequencing", ()) .await @@ -26,7 +24,6 @@ pub async fn disable_automatic_sequencing(provider: &NamedProvider) -> Result Result<()> { provider - .provider .client() .request("miner_start", ()) .await @@ -35,7 +32,6 @@ pub async fn miner_start(provider: &NamedProvider) -> Result<()> { pub async fn miner_stop(provider: &NamedProvider) -> Result<()> { provider - .provider .client() .request("miner_stop", ()) .await @@ -52,7 +48,7 @@ pub async fn miner_stop(provider: &NamedProvider) -> Result<()> { /// * `Ok(())` if all nodes reach the target block within the timeout /// * `Err` if timeout is reached or any provider call fails pub async fn wait_for_block(nodes: &[&NamedProvider], target_block: u64) -> Result<()> { - let timeout_duration = Duration::from_secs(60); + let timeout_duration = Duration::from_secs(30); let timeout_secs = timeout_duration.as_secs(); tracing::info!( @@ -62,12 +58,12 @@ pub async fn wait_for_block(nodes: &[&NamedProvider], target_block: u64) -> Resu timeout_secs ); - for i in 0..timeout_secs { + for i in 0..timeout_secs * 2 { let mut all_synced = true; let mut node_statuses = Vec::new(); for node in nodes { - let current_block = node.provider.get_block_number().await?; + let current_block = node.get_block_number().await?; node_statuses.push((node.name, current_block)); if current_block < target_block { @@ -84,8 +80,8 @@ pub async fn wait_for_block(nodes: &[&NamedProvider], target_block: u64) -> Resu } // Log progress every 5 seconds - if i % 5 == 0 { - tracing::info!("Progress check ({}s elapsed):", i); + if i % 10 == 0 { + tracing::info!("Progress check ({}s elapsed):", i / 2); for (name, block) in node_statuses { tracing::info!( " - {}: block {} / {} {}", @@ -97,7 +93,7 @@ pub async fn wait_for_block(nodes: &[&NamedProvider], target_block: u64) -> Resu } } - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(500)).await; } eyre::bail!( @@ -125,8 +121,7 @@ pub async fn assert_blocks_match(nodes: &[&NamedProvider], block_number: u64) -> // Fetch blocks from all nodes for node in nodes { - let block_opt = - node.provider.get_block_by_number(BlockNumberOrTag::Number(block_number)).await?; + let block_opt = node.get_block_by_number(BlockNumberOrTag::Number(block_number)).await?; let block = block_opt .ok_or_else(|| eyre::eyre!("{} block {} not found", node.name, block_number))?; @@ -158,3 +153,64 @@ pub async fn assert_blocks_match(nodes: &[&NamedProvider], block_number: u64) -> Ok(()) } + +pub async fn assert_latest_block(nodes: &[&NamedProvider], expected_block: u64) -> Result { + if nodes.is_empty() { + return Ok(0); + } + + // Verify all nodes have the expected latest block + for node in nodes { + let block_number = node.get_block_number().await?; + assert_eq!( + block_number, expected_block, + "{} is at block {}, expected {}", + node.name, block_number, expected_block + ); + } + + assert_blocks_match(nodes, expected_block).await?; + + tracing::info!( + "โœ… All {} nodes are at the expected latest block and hashes match", + nodes.len() + ); + + Ok(expected_block) +} + +/// Add a peer to the node's peer set via admin API +pub async fn admin_add_peer(provider: &NamedProvider, enode: &str) -> Result { + provider + .client() + .request("admin_addPeer", (enode,)) + .await + .map_err(|e| eyre::eyre!("Failed to add peer {}: {}", enode, e)) +} + +/// Remove a peer from the node's peer set via admin API +pub async fn admin_remove_peer(provider: &NamedProvider, enode: &str) -> Result { + provider + .client() + .request("admin_removePeer", (enode,)) + .await + .map_err(|e| eyre::eyre!("Failed to remove peer {}: {}", enode, e)) +} + +/// Add a trusted peer to the node's trusted peer set via admin API +pub async fn admin_add_trusted_peer(provider: &NamedProvider, enode: &str) -> Result { + provider + .client() + .request("admin_addTrustedPeer", (enode,)) + .await + .map_err(|e| eyre::eyre!("Failed to add trusted peer {}: {}", enode, e)) +} + +/// Remove a trusted peer from the node's trusted peer set via admin API +pub async fn admin_remove_trusted_peer(provider: &NamedProvider, enode: &str) -> Result { + provider + .client() + .request("admin_removeTrustedPeer", (enode,)) + .await + .map_err(|e| eyre::eyre!("Failed to remove trusted peer {}: {}", enode, e)) +} diff --git a/tests/tests/block_propagation_multi_clients.rs b/tests/tests/block_propagation_multi_clients.rs deleted file mode 100644 index c6f41f60..00000000 --- a/tests/tests/block_propagation_multi_clients.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Tests for block propagation to both geth and reth follower nodes. - -use eyre::Result; -use tests::*; - -#[tokio::test] -async fn test_docker_block_propagation_to_both_clients() -> Result<()> { - reth_tracing::init_test_tracing(); - - tracing::info!("=== STARTING test_docker_block_propagation_to_both_clients ==="); - let env = DockerComposeEnv::new("multi-client-propagation").await?; - - let rn_sequencer = env.get_rn_sequencer_provider().await?; - let rn_follower = env.get_rn_follower_provider().await?; - let l2geth_sequencer = env.get_l2geth_sequencer_provider().await?; - let l2geth_follower = env.get_l2geth_follower_provider().await?; - - let nodes = [&rn_sequencer, &rn_follower, &l2geth_sequencer, &l2geth_follower]; - - // Enable block production on l2geth sequencer - utils::miner_start(&l2geth_sequencer).await?; - - // Wait for all nodes to be at block 10 - let target_block = 10; - utils::wait_for_block(&nodes, target_block).await?; - // Verify blocks match across all clients - utils::assert_blocks_match(&nodes, target_block).await?; - - Ok(()) -} diff --git a/tests/tests/heterogeneous_client_sync_and_sequencer_handoff.rs b/tests/tests/heterogeneous_client_sync_and_sequencer_handoff.rs new file mode 100644 index 00000000..a9c1a626 --- /dev/null +++ b/tests/tests/heterogeneous_client_sync_and_sequencer_handoff.rs @@ -0,0 +1,173 @@ +use eyre::Result; +use tests::*; + +/// Tests cross-client block propagation and synchronization between heterogeneous nodes. +/// +/// This integration test validates that blocks can be successfully propagated between +/// different Ethereum client implementations (l2geth and rollup-node) in various +/// network topologies. The test exercises: +/// +/// 1. **Isolated Network Segments**: Initially runs l2geth nodes in isolation, verifying they can +/// produce and sync blocks independently +/// - Topology: `l2geth_follower -> l2geth_sequencer` +/// - l2geth_sequencer produces blocks, l2geth_follower syncs +/// - Rollup nodes remain disconnected at block 0 +/// +/// 2. **Cross-Client Synchronization**: Connects rollup nodes to the l2geth network, ensuring they +/// can catch up to the current chain state +/// - Topology: `[rn_follower, rn_sequencer, l2geth_follower] -> l2geth_sequencer` +/// - All nodes connect to l2geth_sequencer as the single source of truth +/// - Rollup nodes sync from block 0 to current height +/// +/// 3. **Sequencer Handoff**: Transitions block production from l2geth to rollup-node, testing that +/// all nodes stay synchronized during the transition +/// - Topology remains: `[rn_follower, rn_sequencer, l2geth_follower] -> l2geth_sequencer` +/// - Block production switches from l2geth_sequencer to rn_sequencer +/// - All nodes receive new blocks from rn_sequencer via l2geth_sequencer relay +/// +/// 4. **Network Partition Recovery**: Disconnects l2geth nodes, continues production on rollup +/// nodes, then reconnects to verify successful resynchronization +/// - Initial partition: `rn_follower -> rn_sequencer` (isolated rollup network) +/// - l2geth nodes disconnected, fall behind in block height +/// - Reconnection topology: `[l2geth_follower, l2geth_sequencer] -> rn_sequencer` +/// - l2geth nodes catch up by syncing from rn_sequencer +/// +/// 5. **Bidirectional Compatibility**: Returns block production to l2geth after rollup nodes have +/// extended the chain, ensuring backward compatibility +/// - Final topology: `[rn_follower, l2geth_follower, l2geth_sequencer] -> rn_sequencer` +/// - Block production returns to l2geth_sequencer +/// - Validates that l2geth can continue the chain after rollup node blocks +/// +/// The test validates that both client implementations maintain consensus despite +/// network topology changes, sequencer transitions, and temporary network partitions. +/// Each topology change tests different aspects of peer discovery, block gossip, +/// and chain synchronization across heterogeneous client implementations. +#[tokio::test] +async fn test_heterogeneous_client_sync_and_sequencer_handoff() -> Result<()> { + reth_tracing::init_test_tracing(); + + tracing::info!("=== STARTING test_heterogeneous_client_sync_and_sequencer_handoff ==="); + let env = DockerComposeEnv::new("multi-client-propagation").await?; + + let rn_sequencer = env.get_rn_sequencer_provider().await?; + let rn_follower = env.get_rn_follower_provider().await?; + let l2geth_sequencer = env.get_l2geth_sequencer_provider().await?; + let l2geth_follower = env.get_l2geth_follower_provider().await?; + + let rn_nodes = [&rn_sequencer, &rn_follower]; + let l2geth_nodes = [&l2geth_sequencer, &l2geth_follower]; + let nodes = [&rn_sequencer, &rn_follower, &l2geth_sequencer, &l2geth_follower]; + + // Connect only l2geth nodes first + // l2geth_follower -> l2geth_sequencer + utils::admin_add_peer(&l2geth_follower, &env.l2geth_sequencer_enode()?).await?; + tracing::info!("โœ… Connected l2geth follower to l2geth sequencer"); + + // Enable block production on l2geth sequencer + utils::miner_start(&l2geth_sequencer).await?; + + // Wait for at least 10 blocks to be produced + let target_block = 10; + utils::wait_for_block(&[&l2geth_sequencer], target_block).await?; + utils::miner_stop(&l2geth_sequencer).await?; + + let latest_block = l2geth_sequencer.get_block_number().await?; + + // Wait for all l2geth nodes to reach the latest block + utils::wait_for_block(&l2geth_nodes, latest_block).await?; + utils::assert_blocks_match(&l2geth_nodes, latest_block).await?; + tracing::info!("โœ… All l2geth nodes reached block {}", latest_block); + + // Assert rollup nodes are still at block 0 + utils::assert_latest_block(&rn_nodes, 0).await?; + + // Connect rollup nodes to l2geth sequencer + // topology: + // l2geth_follower -> l2geth_sequencer + // rn_follower -> l2geth_sequencer + // rn_sequencer -> l2geth_sequencer + utils::admin_add_peer(&rn_follower, &env.l2geth_sequencer_enode()?).await?; + utils::admin_add_peer(&rn_sequencer, &env.l2geth_sequencer_enode()?).await?; + tracing::info!("โœ… Connected rollup nodes to l2geth sequencer"); + + // Continue block production on l2geth sequencer + utils::miner_start(&l2geth_sequencer).await?; + + // Wait for all nodes to reach target block + let target_block = latest_block + 10; + utils::wait_for_block(&nodes, target_block).await?; + + utils::miner_stop(&l2geth_sequencer).await?; + let latest_block = l2geth_sequencer.get_block_number().await?; + utils::wait_for_block(&nodes, latest_block).await?; + utils::assert_blocks_match(&nodes, latest_block).await?; + tracing::info!("โœ… All nodes reached block {}", latest_block); + + // Enable sequencing on RN sequencer + tracing::info!("Enabling sequencing on RN sequencer"); + utils::enable_automatic_sequencing(&rn_sequencer).await?; + let target_block = latest_block + 10; + utils::wait_for_block(&nodes, target_block).await?; + + utils::disable_automatic_sequencing(&rn_sequencer).await?; + let latest_block = rn_sequencer.get_block_number().await?; + utils::wait_for_block(&nodes, latest_block).await?; + utils::assert_blocks_match(&nodes, latest_block).await?; + tracing::info!("โœ… All nodes reached block {}", latest_block); + + // Disconnect l2geth follower from l2geth sequencer + // topology: + // rn_follower -> rn_sequencer + utils::admin_remove_peer(&rn_follower, &env.l2geth_sequencer_enode()?).await?; + utils::admin_remove_peer(&rn_sequencer, &env.l2geth_sequencer_enode()?).await?; + utils::admin_remove_peer(&l2geth_follower, &env.l2geth_sequencer_enode()?).await?; + utils::admin_add_peer(&rn_follower, &env.rn_sequencer_enode()?).await?; + + // Continue sequencing on RN sequencer for at least 10 blocks + utils::enable_automatic_sequencing(&rn_sequencer).await?; + let target_block = latest_block + 10; + utils::wait_for_block(&rn_nodes, target_block).await?; + + // Make sure l2geth nodes are still at the old block -> they need to sync once reconnected + assert!( + l2geth_sequencer.get_block_number().await? <= target_block + 1, + "l2geth sequencer should be at most at block {}, but is at {}", + target_block + 1, + l2geth_sequencer.get_block_number().await? + ); + assert!( + l2geth_follower.get_block_number().await? <= target_block + 1, + "l2geth follower should be at most at block {}, but is at {}", + target_block + 1, + l2geth_follower.get_block_number().await? + ); + + // Reconnect l2geth follower to l2geth sequencer and let them sync + // topology: + // rn_follower -> rn_sequencer + // l2geth_follower -> rn_sequencer + // l2geth_sequencer -> rn_sequencer + utils::admin_add_peer(&l2geth_follower, &env.rn_sequencer_enode()?).await?; + utils::admin_add_peer(&l2geth_sequencer, &env.rn_sequencer_enode()?).await?; + + // Wait for all nodes to reach the same block again + let target_block = target_block + 10; + utils::wait_for_block(&nodes, target_block).await?; + tracing::info!("โœ… l2geth nodes synced to block {}", target_block); + + // Disable sequencing on RN sequencer + utils::disable_automatic_sequencing(&rn_sequencer).await?; + let latest_block = rn_sequencer.get_block_number().await?; + tracing::info!("Switched RN sequencing off at block {}", latest_block); + utils::wait_for_block(&nodes, latest_block).await?; + utils::assert_blocks_match(&nodes, latest_block).await?; + + // start sequencing on l2geth sequencer again and make sure all nodes reach the same block in + // the end + utils::miner_start(&l2geth_sequencer).await?; + let target_block = latest_block + 20; + utils::wait_for_block(&nodes, target_block).await?; + assert_blocks_match(&nodes, target_block).await?; + + Ok(()) +}