diff --git a/.github/workflows/dev-ecr-deploy.yml b/.github/workflows/dev-ecr-deploy.yml index bb3309b0080d5..8cfc30458161f 100644 --- a/.github/workflows/dev-ecr-deploy.yml +++ b/.github/workflows/dev-ecr-deploy.yml @@ -40,7 +40,7 @@ jobs: ECR_REPOSITORY: optimism/rollup-microservices IMAGE_TAG: latest run: | - docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . + docker build -f Dockerfile.microservices -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG # TODO: Add this when the DEV env is set up diff --git a/docker-compose.microservices.yml b/aws/dev/microservices/docker-compose.yml similarity index 59% rename from docker-compose.microservices.yml rename to aws/dev/microservices/docker-compose.yml index 1ebdc547e9d94..2cfd2a32ca305 100644 --- a/docker-compose.microservices.yml +++ b/aws/dev/microservices/docker-compose.yml @@ -1,9 +1,8 @@ version: "3" - services: - # Look at packages/rollup-core/README.md for info on configuration microservices: + image: .dkr.ecr.us-east-2.amazonaws.com/optimism/rollup-full-node:latest volumes: - l1-node-data:/mnt/l1-node:rw - l2-node-data:/mnt/l2-node:rw @@ -11,56 +10,56 @@ services: context: . dockerfile: Dockerfile.microservices environment: -# Logging + # Logging - DEBUG=info*,error*,warn*,debug* # The comma-separated logging patterns to match (common options are `error*`, `info*`, `warn*`, and `debug*`) -# Postgres - - POSTGRES_HOST=postgres # (Required) The host DNS entry / IP for the postgres DB + # Postgres + - POSTGRES_HOST= # (Required) The host DNS entry / IP for the postgres DB - POSTGRES_PORT=5432 # (Required) Should almost always be 5432 - - POSTGRES_USER=test # (Required) The user to use to connect to the db - - POSTGRES_PASSWORD=test # (Required) The password to use to connect to the db + - POSTGRES_USER # (Required) The user to use to connect to the db + - POSTGRES_PASSWORD # (Required) The password to use to connect to the db - POSTGRES_DATABASE=rollup # (Required) The database name to connect to (should be `rollup`) - POSTGRES_CONNECTION_POOL_SIZE # The connection pool size for postgres (defaults to 20) - POSTGRES_USE_SSL # Set to anything to indicate that SSL should be used in the connection -# L1 Node - - L1_NODE_INFURA_NETWORK # The Infura network for the connection to the L1 node + # L1 Node + - L1_NODE_INFURA_NETWORK=rinkeby # The Infura network for the connection to the L1 node - L1_NODE_INFURA_PROJECT_ID # The Infura project ID for the connection to the L1 node - L1_NODE_WEB3_URL # The URL of the L1 node - - FINALITY_DELAY_IN_BLOCKS # The number of block confirmations required to consider a transaction final on L1 -# L2 Node - - L2_NODE_WEB3_URL # The URL of the L2 node -# L1 Submitters + - FINALITY_DELAY_IN_BLOCKS=1 # The number of block confirmations required to consider a transaction final on L1 + # L2 Node + - L2_NODE_WEB3_URL=l2_geth # The URL of the L2 node + # L1 Submitters - L1_SEQUENCER_PRIVATE_KEY # The private key to use to submit Sequencer Transaction / State Batches -# Shared Contracts - - CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS # (Required) The address of the CanonicalTransactionChain contract - - STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS # (Required) The address of the StateCommitmentChain contract -# L1 Chain Data Persister (needs Postgres & L1 Node vars above) - - L1_TO_L2_TRANSACTION_QUEUE_CONTRACT_ADDRESS # (Required) The address of the L1ToL2TransactionQueue contract - - SAFETY_TRANSACTION_QUEUE_CONTRACT_ADDRESS # (Required) The address of the SafetyTransactionQueue contract - - L1_CHAIN_DATA_PERSISTER_DB_PATH # (Required) The filepath where to locate (or create) the L1 Chain Data Persister LevelDB database - - L1_EARLIEST_BLOCK # (Required) The earliest block to sync on L1 to start persisting data -# L2 Chain Data Persister (needs Postgres & L2 Node vars above) + # Shared Contracts + - CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS=0x60cdcc971edcbF2aBe9a76F5BbF3ca1BE46432aB # (Required) The address of the CanonicalTransactionChain contract + - STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS=0xDB7555eD502B4d53Bb0c7A8cd1d9f4f82A5c4c65 # (Required) The address of the StateCommitmentChain contract + # L1 Chain Data Persister (needs Postgres & L1 Node vars above) + - L1_TO_L2_TRANSACTION_QUEUE_CONTRACT_ADDRESS=0xb49655d0c5d50aeE6702df6EE900923b9C327687 # (Required) The address of the L1ToL2TransactionQueue contract + - SAFETY_TRANSACTION_QUEUE_CONTRACT_ADDRESS=0x53833580b882DEED4F45051B639eADbBc2Eafb1A # (Required) The address of the SafetyTransactionQueue contract + - L1_CHAIN_DATA_PERSISTER_DB_PATH=/mnt/l1-node # (Required) The filepath where to locate (or create) the L1 Chain Data Persister LevelDB database + - L1_EARLIEST_BLOCK= # (Required) The earliest block to sync on L1 to start persisting data + # L2 Chain Data Persister (needs Postgres & L2 Node vars above) - L2_CHAIN_DATA_PERSISTER_DB_PATH # (Required) The filepath where to locate (or create) the L2 Chain Data Persister LevelDB database -# Geth Submission Queuer (needs Postgres vars above) + # Geth Submission Queuer (needs Postgres vars above) - IS_SEQUENCER_STACK # (Required) Set if this is queueing Geth submissions for a sequencer (and not _just_ a verifier) - GETH_SUBMISSION_QUEUER_PERIOD_MILLIS # The period in millis at which the GethSubmissionQueuer should attempt to queue an L2 Geth submission (defaults to 10,000) -# Queued Geth Submitter (needs Postgres & L2 Node vars above) + # Queued Geth Submitter (needs Postgres & L2 Node vars above) - QUEUED_GETH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the QueuedGethSubmitter should attempt to send L2 Geth submissions (defaults to 10,000) -# Canonical Transaction Chain Batch Creator (needs Postgres vars above) + # Canonical Transaction Chain Batch Creator (needs Postgres vars above) - CANONICAL_CHAIN_MIN_BATCH_SIZE # The minimum batch size to build -- if fewer than this number of transactions are ready, a batch will not be created (defaults to 10) - CANONICAL_CHAIN_MAX_BATCH_SIZE # The maximum batch size to build -- if more than this number of transactions are ready, they will be split into multiple batches of at most this size (defaults to 100) - CANONICAL_CHAIN_BATCH_CREATOR_PERIOD_MILLIS # The period in millis at which the CanonicalChainBatchCreator should attempt to create Canonical Chain Batches (defaults to 10,000) -# Canonical Transaction Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, and CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS vars above) + # Canonical Transaction Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, and CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS vars above) - CANONICAL_CHAIN_BATCH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the CanonicalChainBatchCreator should attempt to create Canonical Chain Batches (defaults to 10,000) -# State Commitment Chain Batch Creator (needs Postgres vars above) + # State Commitment Chain Batch Creator (needs Postgres vars above) - STATE_COMMITMENT_CHAIN_MIN_BATCH_SIZE # The minimum batch size to build -- if fewer than this number of transactions are ready, a batch will not be created (defaults to 10) - STATE_COMMITMENT_CHAIN_MAX_BATCH_SIZE # The maximum batch size to build -- if more than this number of transactions are ready, they will be split into multiple batches of at most this size (defaults to 100) - STATE_COMMITMENT_CHAIN_BATCH_CREATOR_PERIOD_MILLIS # The period in millis at which the StateCommitmentChainBatchCreator should attempt to create StateCommitmentChain Batches (defaults to 10,000) -# State Commitment Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS vars above) + # State Commitment Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS vars above) - STATE_COMMITMENT_CHAIN_BATCH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the StateCommitmentChainBatchCreator should attempt to create StateCommitmentChain Batches (defaults to 10,000) -# Fraud Detector + # Fraud Detector - FRAUD_DETECTOR_PERIOD_MILLIS # The period in millis at which the FraudDetector should run (defaults to 10,000) - REALERT_ON_UNRESOLVED_FRAUD_EVERY_N_FRAUD_DETECTOR_RUNS # The number of runs after which a detected fraud, if still present, should re-alert (via error logs) (defaults to 10) -# Which Services to run (respective vars must be configured above) + # Which Services to run (respective vars must be configured above) - RUN_L1_CHAIN_DATA_PERSISTER # Set to anything to run L1 Chain Data Persister - RUN_L2_CHAIN_DATA_PERSISTER # Set to anything to run L2 Chain Data Persister - RUN_GETH_SUBMISSION_QUEUER # Set to anything to run Geth Submission Queuer @@ -71,15 +70,40 @@ services: - RUN_STATE_COMMITMENT_CHAIN_BATCH_SUBMITTER # Set to anything to run State Commitment Chain Batch Submitter - RUN_FRAUD_DETECTOR # Set to anything to run Fraud Detector - postgres: - build: - context: ./db/ - dockerfile: db.dockerfile + logging: + driver: awslogs + options: + awslogs-group: dev-microservices + awslogs-region: us-east-2 + awslogs-stream-prefix: dev-microservices + + + geth_l2: + image: .dkr.ecr.us-east-2.amazonaws.com/optimism/geth:latest + volumes: + - l2-node-data:/mnt/l2-node/l2:rw environment: - - POSTGRES_USER=test - - POSTGRES_PASSWORD=test + - VOLUME_PATH=/mnt/l2-node/l2 + - HOSTNAME=0.0.0.0 + - PORT=9545 + - NETWORK_ID=108 + - KEYSTORE_PATH_SUFFIX=/keystore + - SEALER_PRIVATE_KEY_PATH_SUFFIX=/sealer_private_key.txt + - PRIVATE_KEY_PATH_SUFFIX=/private_key.txt + - ADDRESS_PATH_SUFFIX=/address.txt + - SEALER_ADDRESS_PATH_SUFFIX=/sealer_address.txt + - INITIAL_BALANCE=0x200000000000000000000000000000000000000000000000000000000000000 + - GENISIS_PATH=etc/rollup-fullnode.json + - SETUP_RUN_PATH_SUFFIX=/setup_run.txt ports: - - 5432:5432 + - 9545:9545 + + logging: + driver: awslogs + options: + awslogs-group: rollup-full-node + awslogs-region: us-east-2 + awslogs-stream-prefix: l2-node volumes: l1-node-data: @@ -92,3 +116,5 @@ volumes: + + diff --git a/aws/dev/microservices/ecs-params.yml b/aws/dev/microservices/ecs-params.yml new file mode 100644 index 0000000000000..d02645a1f1b18 --- /dev/null +++ b/aws/dev/microservices/ecs-params.yml @@ -0,0 +1,25 @@ +version: 1 +task_definition: + services: + rollup-full-node: + cpu_shares: 25 + mem_limit: 524288000 + geth_l2: + cpu_shares: 75 + mem_limit: 1523288000 + # This is all local for now -- eventually will change + ecs_network_mode: host + docker_volumes: + - name: l1-node-data + scope: shared + autoprovision: true + driver: 'local' + - name: l2-node-data + scope: shared + autoprovision: true + driver: 'local' + - name: full-node-data + scope: shared + autoprovision: true + driver: 'local' + diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml deleted file mode 100644 index 9dc42c4d70a7c..0000000000000 --- a/docker-compose.dev.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: "3" -services: - - rollup-full-node: - build: - context: . - dockerfile: Dockerfile.dev - volumes: - - .:/mnt/full-node:rw diff --git a/packages/rollup-core/src/app/util/filesystem.ts b/packages/rollup-core/src/app/util/filesystem.ts new file mode 100644 index 0000000000000..a1b1cf70e7817 --- /dev/null +++ b/packages/rollup-core/src/app/util/filesystem.ts @@ -0,0 +1,62 @@ +/* External Imports */ +import {BaseDB, DB, getLevelInstance, newInMemoryDB} from '@eth-optimism/core-db' +import {getLogger} from '@eth-optimism/core-utils' + +import * as rimraf from 'rimraf' + +/* Internal Imports */ +import * as fs from "fs" +import {Environment} from './environment' + +const log = getLogger('filepath-util') + +/** + * Initializes filesystem DB paths. This will also purge all data if the `CLEAR_DATA_KEY` has changed. + */ +export const initializeDBPaths = (dbPath: string, isTestMode: boolean) => { + if (isTestMode) { + return + } + + if (!fs.existsSync(dbPath)) { + makeDataDirectory(dbPath) + } else { + if (Environment.clearDataKey() && !fs.existsSync(getClearDataFilePath(dbPath))) { + log.info(`Detected change in CLEAR_DATA_KEY. Purging data...`) + rimraf.sync(`${dbPath}/{*,.*}`) + log.info( + `Data purged from '${dbPath}/{*,.*}'` + ) + if (Environment.localL1NodePersistentDbPath()) { + rimraf.sync(`${Environment.localL1NodePersistentDbPath()}/{*,.*}`) + log.info( + `Local L1 node data purged from '${Environment.localL1NodePersistentDbPath()}/{*,.*}'` + ) + } + if (Environment.localL2NodePersistentDbPath()) { + rimraf.sync(`${Environment.localL2NodePersistentDbPath()}/{*,.*}`) + log.info( + `Local L2 node data purged from '${Environment.localL2NodePersistentDbPath()}/{*,.*}'` + ) + } + makeDataDirectory() + } + } +} + +/** + * Makes the data directory for this full node and adds a clear data key file if it is configured to use one. + */ +export const makeDataDirectory = () => { + fs.mkdirSync(Environment.l2RpcServerPersistentDbPath(), { recursive: true }) + if (Environment.clearDataKey()) { + fs.writeFileSync(getClearDataFilePath(), '') + } +} + +/** + * Gets the filepath of the "Clear Data" file that dictates whether or not all filesystem data should be cleared on startup. + */ +export const getClearDataFilePath = () => { + return `${Environment.l2RpcServerPersistentDbPath()}/.clear_data_key_${Environment.clearDataKey()}` +} diff --git a/packages/rollup-core/src/app/utils.ts b/packages/rollup-core/src/app/utils.ts deleted file mode 100644 index c1d0af548ec72..0000000000000 --- a/packages/rollup-core/src/app/utils.ts +++ /dev/null @@ -1,265 +0,0 @@ -/* External Imports */ -import { bufToHexString, isObject, remove0x } from '@eth-optimism/core-utils' - -import { Contract, ContractFactory, Wallet } from 'ethers' - -/* Internal Imports */ -import { Address, EVMBytecode, EVMOpcodeAndBytes, Opcode } from '../types' -import { BaseProvider, TransactionResponse } from 'ethers/providers' - -/** - * Creates an unsigned transaction and returns its calldata. - * - * @param contract The contract containing the function being invoked - * @param functionName The function being invoked - * @param args The arguments of the function call - * @returns The unsigned transaction's calldata - */ -export const getUnsignedTransactionCalldata = ( - contract: Contract | ContractFactory, - functionName: string, - args: any[] = [] -) => { - return contract.interface.functions[functionName].encode(args) -} - -/** - * Takes EVMBytecode and serializes it into a single Buffer. - * - * @param bytecode The bytecode to serialize into a single Buffer. - * @returns The resulting Buffer. - */ -export const bytecodeToBuffer = (bytecode: EVMBytecode): Buffer => { - return Buffer.concat( - bytecode.map((b) => { - return b.consumedBytes !== undefined - ? Buffer.concat([b.opcode.code, b.consumedBytes]) - : b.opcode.code - }) - ) -} - -/** - * Parses the provided Buffer into EVMBytecode. - * Note: If the Buffer is not valid bytecode, this will throw. - * - * @param buffer The buffer in question. - * @returns The parsed EVMBytecode. - */ -export const bufferToBytecode = (buffer: Buffer): EVMBytecode => { - const bytecode: EVMBytecode = [] - - for (let pc = 0; pc < buffer.length; pc++) { - const opcode = Opcode.parseByNumber(buffer[pc]) - if (!opcode) { - bytecode.push({ - opcode: { - name: `UNRECOGNIZED (${bufToHexString(Buffer.from([buffer[pc]]))})`, - code: Buffer.from([buffer[pc]]), - programBytesConsumed: 0, - }, - consumedBytes: undefined, - }) - continue - } - const consumedBytes: Buffer = - opcode.programBytesConsumed === 0 - ? undefined - : buffer.slice(pc + 1, pc + 1 + opcode.programBytesConsumed) - - bytecode.push({ - opcode, - consumedBytes, - }) - - pc += opcode.programBytesConsumed - } - return bytecode -} - -/** - * Gets the provided EVMBytecode as a printable string, where each line is an opcode and bytes. - * - * @param bytecode The EVMBytecode in question. - * @returns The resulting string. - */ -export const formatBytecode = (bytecode: EVMBytecode): string => { - return bytecode - .map((x, index) => { - let tagString: string = '(no tag)' - if (!!x.tag) { - tagString = `Metadata Tag: ${JSON.stringify(x.tag)}` - } - const pcAsString: string = padToLength( - getPCOfEVMBytecodeIndex(index, bytecode), - 10 - ) - if (x.consumedBytes === undefined) { - return `[PC ${pcAsString}] ${x.opcode.name} ${tagString}` - } - return `[PC ${pcAsString}] ${x.opcode.name}: ${bufToHexString( - x.consumedBytes - )} ${tagString}` - }) - .join('\n') -} - -const padToLength = (num: number, len: number): string => { - const str = num.toString(16) - return str.length >= len ? str : '0'.repeat(len - str.length) + str -} - -/** - * Gets the PC of the operation at a given index in some EVMBytecode. - * In other words, it gives us the index of where a given element in some EVMBytecode would be in its raw Buffer form. - * - * @param indexOfEVMOpcodeAndBytes The index of an EVMOpcodeAndBytes element to find the PC of. - * @param bytecode The EVMBytecode in question. - * @returns The resulting index in raw bytes where the EVMOpcodeAndBytes begins. - */ -export const getPCOfEVMBytecodeIndex = ( - indexOfEVMOpcodeAndBytes: number, - bytecode: EVMBytecode -): number => { - let pc: number = 0 - for (let i = 0; i < indexOfEVMOpcodeAndBytes; i++) { - const operation: EVMOpcodeAndBytes = bytecode[i] - const totalBytesForOperation = - operation.consumedBytes === undefined - ? 1 - : 1 + operation.opcode.programBytesConsumed - pc += totalBytesForOperation - } - return pc -} - -export const getWallets = (httpProvider) => { - const walletsToReturn = [] - for (let i = 0; i < 9; i++) { - const privateKey = '0x' + ('5' + i).repeat(32) - const nextWallet = new Wallet(privateKey, httpProvider) - walletsToReturn[i] = nextWallet - } - return walletsToReturn -} - -export const deployContract = async ( - wallet, - contractJSON, - args, - overrideOptions = {} -) => { - const factory = new ContractFactory( - contractJSON.abi, - contractJSON.bytecode || contractJSON.evm.bytecode, - wallet - ) - - const contract = await factory.deploy(...args) - await contract.deployed() - return contract -} - -/** - * Gets the current number of seconds since the epoch. - * - * @returns The seconds since epoch. - */ -export const getCurrentTime = (): number => { - return Math.round(new Date().getTime() / 1000) -} - -/** - * Returns whether or not the provided addresses are equal, ignoring case and prefix. - * - * @param one The first address. - * @param two The second address - */ -export const addressesAreEqual = (one: Address, two: Address): boolean => { - if (!one && !two) { - return true - } - if (!one || !two) { - return false - } - - return remove0x(one).toLowerCase() === remove0x(two).toLowerCase() -} - -/** - * Converts the provided Provider into a Provider capable of parsing L1MessageSender off of - * Ethers Transactions and blocks that contain Transactions (for use in consuming L2 blocks). - * - * @param baseProvider The provider to modify. - * @returns The modified provider, capable of parsing L1MessageSender off of transactions. - */ -export const monkeyPatchL2Provider = (baseProvider) => { - // Patch static tx parsing function of BaseProvider - // (unfortunately this won't apply to blocks with txs) - const checkTransactionResponse = BaseProvider.checkTransactionResponse - BaseProvider.checkTransactionResponse = (tx): TransactionResponse => { - const res = checkTransactionResponse(tx) - if (isObject(tx) && !!tx['l1MessageSender']) { - res['l1MessageSender'] = tx['l1MessageSender'] - } - return res - } - - // Need to overwrite perform in order to save the raw block to - // parse l1MessageSender from it after getBlock - const perform = baseProvider.perform - baseProvider.perform = async function(method, args) { - if ( - method === 'eth_getBlockByHash' || - method === 'eth_getBlockByNumber' || - method === 'getBlock' - ) { - const rawBlock = await perform.call(this, method, args) - if (!rawBlock) { - return rawBlock - } - if (!this.fetchedBlocks) { - this.fetchedBlocks = new Map() - } - this.fetchedBlocks.set(rawBlock.hash, rawBlock) - return rawBlock - } - - return perform.call(this, method, args) - } - - // Overwrite getBlock to function as normally but put - // the appropriate l1MessageSender on all transactions in the resulting object - const getBlock = baseProvider.getBlock - baseProvider.getBlock = async function(identifier, includeTxs) { - const block = await getBlock.call(this, identifier, includeTxs) - if ( - !block || - !includeTxs || - !block.transactions || - block.transactions.length === 0 - ) { - return block - } - if (!this.fetchedBlocks) { - return block - } - - const rawBlock = this.fetchedBlocks.get(block.hash) - if (!rawBlock) { - return block - } - - for (let i = 0; i < block.transactions.length; i++) { - if (!!rawBlock.transactions[i]['l1MessageSender']) { - block.transactions[i]['l1MessageSender'] = - rawBlock.transactions[i]['l1MessageSender'] - } - } - - this.fetchedBlocks.delete(block.hash) - return block - } - - return baseProvider -}