Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions op-node/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,13 @@ var (
Value: 20,
Category: L1RPCCategory,
}
L1CacheSize = &cli.UintFlag{
Name: "l1.cache-size",
Usage: "Cache size for blocks, receipts and transactions. " +
"It's optional and a sane default of 3/2 the sequencing window size is used if this field is set to 0.",
EnvVars: prefixEnvVars("L1_CACHE_SIZE"),
Category: L1RPCCategory,
}
L1HTTPPollInterval = &cli.DurationFlag{
Name: "l1.http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
Expand Down Expand Up @@ -423,6 +430,7 @@ var optionalFlags = []cli.Flag{
L1RPCMaxBatchSize,
L1RPCMaxConcurrency,
L1HTTPPollInterval,
L1CacheSize,
VerifierL1Confs,
SequencerEnabledFlag,
SequencerStoppedFlag,
Expand Down
27 changes: 21 additions & 6 deletions op-node/node/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,12 @@ type L1EndpointConfig struct {
// It is recommended to use websockets or IPC for efficient following of the changing block.
// Setting this to 0 disables polling.
HttpPollInterval time.Duration

// CacheSize specifies the cache size for blocks, receipts and transactions. It's optional and a
// sane default of 3/2 the sequencing window size is used during Setup if this field is set to 0.
// Note that receipts and transactions are cached per block, which is why there's only one cache
// size to configure.
CacheSize uint
}

var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
Expand All @@ -129,11 +135,14 @@ func (cfg *L1EndpointConfig) Check() error {
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
}
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
return fmt.Errorf("rate limit cannot be negative: %f", cfg.RateLimit)
}
if cfg.MaxConcurrency < 1 {
return fmt.Errorf("max concurrent requests cannot be less than 1, was %d", cfg.MaxConcurrency)
}
if cfg.CacheSize > 1_000_000 {
return fmt.Errorf("cache size is dangerously large: %d", cfg.CacheSize)
}
return nil
}

Expand All @@ -146,14 +155,20 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
opts = append(opts, client.WithRateLimit(cfg.RateLimit, cfg.BatchSize))
}

l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
l1RPC, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
if err != nil {
return nil, nil, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
}
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
return l1Node, rpcCfg, nil

var l1Cfg *sources.L1ClientConfig
if cfg.CacheSize > 0 {
l1Cfg = sources.L1ClientSimpleConfig(cfg.L1TrustRPC, cfg.L1RPCKind, int(cfg.CacheSize))
} else {
l1Cfg = sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
}
l1Cfg.MaxRequestsPerBatch = cfg.BatchSize
l1Cfg.MaxConcurrentRequests = cfg.MaxConcurrency
return l1RPC, l1Cfg, nil
}

// PreparedL1Endpoint enables testing with an in-process pre-setup RPC connection to L1
Expand Down
2 changes: 1 addition & 1 deletion op-node/node/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ func (cfg *Config) LoadPersisted(log log.Logger) error {
// Check verifies that the given configuration makes sense
func (cfg *Config) Check() error {
if err := cfg.L1.Check(); err != nil {
return fmt.Errorf("l2 endpoint config error: %w", err)
return fmt.Errorf("l1 endpoint config error: %w", err)
}
if err := cfg.L2.Check(); err != nil {
return fmt.Errorf("l2 endpoint config error: %w", err)
Expand Down
4 changes: 2 additions & 2 deletions op-node/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,13 +188,13 @@ func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
}

func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
l1Node, rpcCfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
l1RPC, l1Cfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to get L1 RPC client: %w", err)
}

n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, &n.metrics.RPCMetrics.RPCClientMetrics), n.log, n.metrics.L1SourceCache, rpcCfg)
client.NewInstrumentedRPC(l1RPC, &n.metrics.RPCMetrics.RPCClientMetrics), n.log, n.metrics.L1SourceCache, l1Cfg)
if err != nil {
return fmt.Errorf("failed to create L1 source: %w", err)
}
Expand Down
1 change: 1 addition & 0 deletions op-node/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
CacheSize: ctx.Uint(flags.L1CacheSize.Name),
}
}

Expand Down
15 changes: 5 additions & 10 deletions op-service/sources/l1_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,25 +24,20 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProvide
}

func L1ClientSimpleConfig(trustRPC bool, kind RPCProviderKind, cacheSize int) *L1ClientConfig {
span := cacheSize
if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
span = 1000
}
return &L1ClientConfig{
EthClientConfig: EthClientConfig{
// receipts and transactions are cached per block
ReceiptsCacheSize: span,
TransactionsCacheSize: span,
HeadersCacheSize: span,
PayloadsCacheSize: span,
ReceiptsCacheSize: cacheSize,
TransactionsCacheSize: cacheSize,
HeadersCacheSize: cacheSize,
PayloadsCacheSize: cacheSize,
MaxRequestsPerBatch: 20, // TODO: tune batch param
MaxConcurrentRequests: 10,
TrustRPC: trustRPC,
MustBePostMerge: false,
RPCProviderKind: kind,
MethodResetDuration: time.Minute,
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
BlockRefsCacheSize: cacheSize,
BlockRefsCacheSize: cacheSize,
},
}
}
Expand Down