Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/dev-ecr-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:
ECR_REPOSITORY: optimism/rollup-microservices
IMAGE_TAG: latest
run: |
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker build -f Dockerfile.microservices -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG

# TODO: Add this when the DEV env is set up
Expand Down
Original file line number Diff line number Diff line change
@@ -1,66 +1,65 @@
version: "3"

services:

# Look at packages/rollup-core/README.md for info on configuration
microservices:
image: <aws_account_id>.dkr.ecr.us-east-2.amazonaws.com/optimism/rollup-full-node:latest
volumes:
- l1-node-data:/mnt/l1-node:rw
- l2-node-data:/mnt/l2-node:rw
build:
context: .
dockerfile: Dockerfile.microservices
environment:
# Logging
# Logging
- DEBUG=info*,error*,warn*,debug* # The comma-separated logging patterns to match (common options are `error*`, `info*`, `warn*`, and `debug*`)
# Postgres
- POSTGRES_HOST=postgres # (Required) The host DNS entry / IP for the postgres DB
# Postgres
- POSTGRES_HOST= # (Required) The host DNS entry / IP for the postgres DB
- POSTGRES_PORT=5432 # (Required) Should almost always be 5432
- POSTGRES_USER=test # (Required) The user to use to connect to the db
- POSTGRES_PASSWORD=test # (Required) The password to use to connect to the db
- POSTGRES_USER # (Required) The user to use to connect to the db
- POSTGRES_PASSWORD # (Required) The password to use to connect to the db
- POSTGRES_DATABASE=rollup # (Required) The database name to connect to (should be `rollup`)
- POSTGRES_CONNECTION_POOL_SIZE # The connection pool size for postgres (defaults to 20)
- POSTGRES_USE_SSL # Set to anything to indicate that SSL should be used in the connection
# L1 Node
- L1_NODE_INFURA_NETWORK # The Infura network for the connection to the L1 node
# L1 Node
- L1_NODE_INFURA_NETWORK=rinkeby # The Infura network for the connection to the L1 node
- L1_NODE_INFURA_PROJECT_ID # The Infura project ID for the connection to the L1 node
- L1_NODE_WEB3_URL # The URL of the L1 node
- FINALITY_DELAY_IN_BLOCKS # The number of block confirmations required to consider a transaction final on L1
# L2 Node
- L2_NODE_WEB3_URL # The URL of the L2 node
# L1 Submitters
- FINALITY_DELAY_IN_BLOCKS=1 # The number of block confirmations required to consider a transaction final on L1
# L2 Node
- L2_NODE_WEB3_URL=l2_geth # The URL of the L2 node
# L1 Submitters
- L1_SEQUENCER_PRIVATE_KEY # The private key to use to submit Sequencer Transaction / State Batches
# Shared Contracts
- CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS # (Required) The address of the CanonicalTransactionChain contract
- STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS # (Required) The address of the StateCommitmentChain contract
# L1 Chain Data Persister (needs Postgres & L1 Node vars above)
- L1_TO_L2_TRANSACTION_QUEUE_CONTRACT_ADDRESS # (Required) The address of the L1ToL2TransactionQueue contract
- SAFETY_TRANSACTION_QUEUE_CONTRACT_ADDRESS # (Required) The address of the SafetyTransactionQueue contract
- L1_CHAIN_DATA_PERSISTER_DB_PATH # (Required) The filepath where to locate (or create) the L1 Chain Data Persister LevelDB database
- L1_EARLIEST_BLOCK # (Required) The earliest block to sync on L1 to start persisting data
# L2 Chain Data Persister (needs Postgres & L2 Node vars above)
# Shared Contracts
- CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS=0x60cdcc971edcbF2aBe9a76F5BbF3ca1BE46432aB # (Required) The address of the CanonicalTransactionChain contract
- STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS=0xDB7555eD502B4d53Bb0c7A8cd1d9f4f82A5c4c65 # (Required) The address of the StateCommitmentChain contract
# L1 Chain Data Persister (needs Postgres & L1 Node vars above)
- L1_TO_L2_TRANSACTION_QUEUE_CONTRACT_ADDRESS=0xb49655d0c5d50aeE6702df6EE900923b9C327687 # (Required) The address of the L1ToL2TransactionQueue contract
- SAFETY_TRANSACTION_QUEUE_CONTRACT_ADDRESS=0x53833580b882DEED4F45051B639eADbBc2Eafb1A # (Required) The address of the SafetyTransactionQueue contract
- L1_CHAIN_DATA_PERSISTER_DB_PATH=/mnt/l1-node # (Required) The filepath where to locate (or create) the L1 Chain Data Persister LevelDB database
- L1_EARLIEST_BLOCK= # (Required) The earliest block to sync on L1 to start persisting data
# L2 Chain Data Persister (needs Postgres & L2 Node vars above)
- L2_CHAIN_DATA_PERSISTER_DB_PATH # (Required) The filepath where to locate (or create) the L2 Chain Data Persister LevelDB database
# Geth Submission Queuer (needs Postgres vars above)
# Geth Submission Queuer (needs Postgres vars above)
- IS_SEQUENCER_STACK # (Required) Set if this is queueing Geth submissions for a sequencer (and not _just_ a verifier)
- GETH_SUBMISSION_QUEUER_PERIOD_MILLIS # The period in millis at which the GethSubmissionQueuer should attempt to queue an L2 Geth submission (defaults to 10,000)
# Queued Geth Submitter (needs Postgres & L2 Node vars above)
# Queued Geth Submitter (needs Postgres & L2 Node vars above)
- QUEUED_GETH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the QueuedGethSubmitter should attempt to send L2 Geth submissions (defaults to 10,000)
# Canonical Transaction Chain Batch Creator (needs Postgres vars above)
# Canonical Transaction Chain Batch Creator (needs Postgres vars above)
- CANONICAL_CHAIN_MIN_BATCH_SIZE # The minimum batch size to build -- if fewer than this number of transactions are ready, a batch will not be created (defaults to 10)
- CANONICAL_CHAIN_MAX_BATCH_SIZE # The maximum batch size to build -- if more than this number of transactions are ready, they will be split into multiple batches of at most this size (defaults to 100)
- CANONICAL_CHAIN_BATCH_CREATOR_PERIOD_MILLIS # The period in millis at which the CanonicalChainBatchCreator should attempt to create Canonical Chain Batches (defaults to 10,000)
# Canonical Transaction Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, and CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS vars above)
# Canonical Transaction Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, and CANONICAL_TRANSACTION_CHAIN_CONTRACT_ADDRESS vars above)
- CANONICAL_CHAIN_BATCH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the CanonicalChainBatchCreator should attempt to create Canonical Chain Batches (defaults to 10,000)
# State Commitment Chain Batch Creator (needs Postgres vars above)
# State Commitment Chain Batch Creator (needs Postgres vars above)
- STATE_COMMITMENT_CHAIN_MIN_BATCH_SIZE # The minimum batch size to build -- if fewer than this number of transactions are ready, a batch will not be created (defaults to 10)
- STATE_COMMITMENT_CHAIN_MAX_BATCH_SIZE # The maximum batch size to build -- if more than this number of transactions are ready, they will be split into multiple batches of at most this size (defaults to 100)
- STATE_COMMITMENT_CHAIN_BATCH_CREATOR_PERIOD_MILLIS # The period in millis at which the StateCommitmentChainBatchCreator should attempt to create StateCommitmentChain Batches (defaults to 10,000)
# State Commitment Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS vars above)
# State Commitment Chain Batch Submitter (needs Postgres, L1 Node, L1 Submitters, STATE_COMMITMENT_CHAIN_CONTRACT_ADDRESS vars above)
- STATE_COMMITMENT_CHAIN_BATCH_SUBMITTER_PERIOD_MILLIS # The period in millis at which the StateCommitmentChainBatchCreator should attempt to create StateCommitmentChain Batches (defaults to 10,000)
# Fraud Detector
# Fraud Detector
- FRAUD_DETECTOR_PERIOD_MILLIS # The period in millis at which the FraudDetector should run (defaults to 10,000)
- REALERT_ON_UNRESOLVED_FRAUD_EVERY_N_FRAUD_DETECTOR_RUNS # The number of runs after which a detected fraud, if still present, should re-alert (via error logs) (defaults to 10)
# Which Services to run (respective vars must be configured above)
# Which Services to run (respective vars must be configured above)
- RUN_L1_CHAIN_DATA_PERSISTER # Set to anything to run L1 Chain Data Persister
- RUN_L2_CHAIN_DATA_PERSISTER # Set to anything to run L2 Chain Data Persister
- RUN_GETH_SUBMISSION_QUEUER # Set to anything to run Geth Submission Queuer
Expand All @@ -71,15 +70,40 @@ services:
- RUN_STATE_COMMITMENT_CHAIN_BATCH_SUBMITTER # Set to anything to run State Commitment Chain Batch Submitter
- RUN_FRAUD_DETECTOR # Set to anything to run Fraud Detector

postgres:
build:
context: ./db/
dockerfile: db.dockerfile
logging:
driver: awslogs
options:
awslogs-group: dev-microservices
awslogs-region: us-east-2
awslogs-stream-prefix: dev-microservices


geth_l2:
image: <aws_account_id>.dkr.ecr.us-east-2.amazonaws.com/optimism/geth:latest
volumes:
- l2-node-data:/mnt/l2-node/l2:rw
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=test
- VOLUME_PATH=/mnt/l2-node/l2
- HOSTNAME=0.0.0.0
- PORT=9545
- NETWORK_ID=108
- KEYSTORE_PATH_SUFFIX=/keystore
- SEALER_PRIVATE_KEY_PATH_SUFFIX=/sealer_private_key.txt
- PRIVATE_KEY_PATH_SUFFIX=/private_key.txt
- ADDRESS_PATH_SUFFIX=/address.txt
- SEALER_ADDRESS_PATH_SUFFIX=/sealer_address.txt
- INITIAL_BALANCE=0x200000000000000000000000000000000000000000000000000000000000000
- GENISIS_PATH=etc/rollup-fullnode.json
- SETUP_RUN_PATH_SUFFIX=/setup_run.txt
ports:
- 5432:5432
- 9545:9545

logging:
driver: awslogs
options:
awslogs-group: rollup-full-node
awslogs-region: us-east-2
awslogs-stream-prefix: l2-node

volumes:
l1-node-data:
Expand All @@ -92,3 +116,5 @@ volumes:





25 changes: 25 additions & 0 deletions aws/dev/microservices/ecs-params.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
version: 1
task_definition:
services:
rollup-full-node:
cpu_shares: 25
mem_limit: 524288000
geth_l2:
cpu_shares: 75
mem_limit: 1523288000
# This is all local for now -- eventually will change
ecs_network_mode: host
docker_volumes:
- name: l1-node-data
scope: shared
autoprovision: true
driver: 'local'
- name: l2-node-data
scope: shared
autoprovision: true
driver: 'local'
- name: full-node-data
scope: shared
autoprovision: true
driver: 'local'

9 changes: 0 additions & 9 deletions docker-compose.dev.yml

This file was deleted.

62 changes: 62 additions & 0 deletions packages/rollup-core/src/app/util/filesystem.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/* External Imports */
import {BaseDB, DB, getLevelInstance, newInMemoryDB} from '@eth-optimism/core-db'
import {getLogger} from '@eth-optimism/core-utils'

import * as rimraf from 'rimraf'

/* Internal Imports */
import * as fs from "fs"
import {Environment} from './environment'

const log = getLogger('filepath-util')

/**
* Initializes filesystem DB paths. This will also purge all data if the `CLEAR_DATA_KEY` has changed.
*/
export const initializeDBPaths = (dbPath: string, isTestMode: boolean) => {
if (isTestMode) {
return
}

if (!fs.existsSync(dbPath)) {
makeDataDirectory(dbPath)
} else {
if (Environment.clearDataKey() && !fs.existsSync(getClearDataFilePath(dbPath))) {
log.info(`Detected change in CLEAR_DATA_KEY. Purging data...`)
rimraf.sync(`${dbPath}/{*,.*}`)
log.info(
`Data purged from '${dbPath}/{*,.*}'`
)
if (Environment.localL1NodePersistentDbPath()) {
rimraf.sync(`${Environment.localL1NodePersistentDbPath()}/{*,.*}`)
log.info(
`Local L1 node data purged from '${Environment.localL1NodePersistentDbPath()}/{*,.*}'`
)
}
if (Environment.localL2NodePersistentDbPath()) {
rimraf.sync(`${Environment.localL2NodePersistentDbPath()}/{*,.*}`)
log.info(
`Local L2 node data purged from '${Environment.localL2NodePersistentDbPath()}/{*,.*}'`
)
}
makeDataDirectory()
}
}
}

/**
* Makes the data directory for this full node and adds a clear data key file if it is configured to use one.
*/
export const makeDataDirectory = () => {
fs.mkdirSync(Environment.l2RpcServerPersistentDbPath(), { recursive: true })
if (Environment.clearDataKey()) {
fs.writeFileSync(getClearDataFilePath(), '')
}
}

/**
* Gets the filepath of the "Clear Data" file that dictates whether or not all filesystem data should be cleared on startup.
*/
export const getClearDataFilePath = () => {
return `${Environment.l2RpcServerPersistentDbPath()}/.clear_data_key_${Environment.clearDataKey()}`
}
Loading