Skip to content

Commit adab48f

Browse files
committed
use state cache to avoid unnecessary block replay
1 parent 00cf5fc commit adab48f

File tree

7 files changed

+71
-9
lines changed

7 files changed

+71
-9
lines changed

beacon_node/src/cli.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -511,6 +511,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
511511
.help("Specifies how many blocks the database should cache in memory [default: 5]")
512512
.takes_value(true)
513513
)
514+
.arg(
515+
Arg::with_name("state-cache-size")
516+
.long("state-cache-size")
517+
.value_name("SIZE")
518+
.help("Specifies how many states the database should cache in memory [default: 1]")
519+
.takes_value(true)
520+
)
514521
/*
515522
* Execution Layer Integration
516523
*/

beacon_node/src/config.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,12 @@ pub fn get_config<E: EthSpec>(
380380
.map_err(|_| "block-cache-size is not a valid integer".to_string())?;
381381
}
382382

383+
if let Some(state_cache_size) = cli_args.value_of("state-cache-size") {
384+
client_config.store.state_cache_size = state_cache_size
385+
.parse()
386+
.map_err(|_| "state-cache-size is not a valid integer".to_string())?;
387+
}
388+
383389
client_config.store.compact_on_init = cli_args.is_present("compact-db");
384390
if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") {
385391
client_config.store.compact_on_prune = compact_on_prune

beacon_node/store/src/config.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec};
77
pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
88
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192;
99
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
10+
pub const DEFAULT_STATE_CACHE_SIZE: usize = 1;
1011

1112
/// Database configuration parameters.
1213
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -17,6 +18,8 @@ pub struct StoreConfig {
1718
pub slots_per_restore_point_set_explicitly: bool,
1819
/// Maximum number of blocks to store in the in-memory block cache.
1920
pub block_cache_size: usize,
21+
/// Maximum number of states to store in the in-memory state cache.
22+
pub state_cache_size: usize,
2023
/// Whether to compact the database on initialization.
2124
pub compact_on_init: bool,
2225
/// Whether to compact the database during database pruning.
@@ -43,6 +46,7 @@ impl Default for StoreConfig {
4346
slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64,
4447
slots_per_restore_point_set_explicitly: false,
4548
block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
49+
state_cache_size: DEFAULT_STATE_CACHE_SIZE,
4650
compact_on_init: false,
4751
compact_on_prune: true,
4852
prune_payloads: true,

beacon_node/store/src/hot_cold_store.rs

Lines changed: 45 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ use std::sync::Arc;
4040
use std::time::Duration;
4141
use types::*;
4242

43-
/// On-disk database that stores finalized states efficiently.
43+
/// On-disk database that stores fnalized states efficiently.
4444
///
4545
/// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores
4646
/// intermittent "restore point" states pre-finalization.
@@ -62,6 +62,8 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
6262
pub hot_db: Hot,
6363
/// LRU cache of deserialized blocks. Updated whenever a block is loaded.
6464
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
65+
/// LRU cache of replayed states.
66+
state_cache: Mutex<LruCache<Slot, BeaconState<E>>>,
6567
/// Chain spec.
6668
pub(crate) spec: ChainSpec,
6769
/// Logger.
@@ -129,6 +131,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
129131
cold_db: MemoryStore::open(),
130132
hot_db: MemoryStore::open(),
131133
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
134+
state_cache: Mutex::new(LruCache::new(config.state_cache_size)),
132135
config,
133136
spec,
134137
log,
@@ -162,6 +165,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
162165
cold_db: LevelDB::open(cold_path)?,
163166
hot_db: LevelDB::open(hot_path)?,
164167
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
168+
state_cache: Mutex::new(LruCache::new(config.state_cache_size)),
165169
config,
166170
spec,
167171
log,
@@ -579,6 +583,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
579583
/// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint
580584
/// (which will be deleted by this function but shouldn't be).
581585
pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> {
586+
// Delete the state from the cache.
587+
self.state_cache.lock().pop(&slot);
588+
582589
// Delete the state summary.
583590
self.hot_db
584591
.key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?;
@@ -977,40 +984,69 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
977984

978985
/// Load a frozen state that lies between restore points.
979986
fn load_cold_intermediate_state(&self, slot: Slot) -> Result<BeaconState<E>, Error> {
987+
if let Some(state) = self.state_cache.lock().get(&slot) {
988+
return Ok(state.clone());
989+
}
990+
980991
// 1. Load the restore points either side of the intermediate state.
981992
let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point;
982993
let high_restore_point_idx = low_restore_point_idx + 1;
983994

995+
// Use low restore point as the base state.
996+
let mut low_slot: Slot =
997+
Slot::new(low_restore_point_idx * self.config.slots_per_restore_point);
998+
let mut low_state: Option<BeaconState<E>> = None;
999+
1000+
// Try to get a more recent state from the cache to avoid massive blocks replay.
1001+
for (s, state) in self.state_cache.lock().iter() {
1002+
if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx
1003+
&& *s < slot
1004+
&& low_slot < *s
1005+
{
1006+
low_slot = *s;
1007+
low_state = Some(state.clone());
1008+
}
1009+
}
1010+
1011+
// If low_state is still None, use load_restore_point_by_index to load the state.
1012+
if low_state.is_none() {
1013+
low_state = Some(self.load_restore_point_by_index(low_restore_point_idx)?);
1014+
}
1015+
9841016
// Acquire the read lock, so that the split can't change while this is happening.
9851017
let split = self.split.read_recursive();
9861018

987-
let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?;
9881019
let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?;
9891020

990-
// 2. Load the blocks from the high restore point back to the low restore point.
1021+
// 2. Load the blocks from the high restore point back to the low point.
9911022
let blocks = self.load_blocks_to_replay(
992-
low_restore_point.slot(),
1023+
low_slot,
9931024
slot,
9941025
self.get_high_restore_point_block_root(&high_restore_point, slot)?,
9951026
)?;
9961027

997-
// 3. Replay the blocks on top of the low restore point.
1028+
// 3. Replay the blocks on top of the low point.
9981029
// Use a forwards state root iterator to avoid doing any tree hashing.
9991030
// The state root of the high restore point should never be used, so is safely set to 0.
10001031
let state_root_iter = self.forwards_state_roots_iterator_until(
1001-
low_restore_point.slot(),
1032+
low_slot,
10021033
slot,
10031034
|| (high_restore_point, Hash256::zero()),
10041035
&self.spec,
10051036
)?;
10061037

1007-
self.replay_blocks(
1008-
low_restore_point,
1038+
let state = self.replay_blocks(
1039+
low_state.unwrap(),
10091040
blocks,
10101041
slot,
10111042
Some(state_root_iter),
10121043
StateRootStrategy::Accurate,
1013-
)
1044+
)?;
1045+
1046+
// If state is not error, put it in the cache.
1047+
self.state_cache.lock().put(slot, state.clone());
1048+
1049+
Ok(state)
10141050
}
10151051

10161052
/// Get the restore point with the given index, or if it is out of bounds, the split state.

book/src/api-lighthouse.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq
455455
"config": {
456456
"slots_per_restore_point": 2048,
457457
"block_cache_size": 5,
458+
"state_cache_size": 1,
458459
"compact_on_init": false,
459460
"compact_on_prune": true
460461
},

book/src/database-migrations.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ curl "http://localhost:5052/lighthouse/database/info"
9292
"slots_per_restore_point": 8192,
9393
"slots_per_restore_point_set_explicitly": true,
9494
"block_cache_size": 5,
95+
"state_cache_size": 1,
9596
"compact_on_init": false,
9697
"compact_on_prune": true
9798
}

lighthouse/tests/beacon_node.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1634,6 +1634,13 @@ fn block_cache_size_flag() {
16341634
.with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize));
16351635
}
16361636
#[test]
1637+
fn state_cache_size_flag() {
1638+
CommandLineTest::new()
1639+
.flag("state-cache-size", Some("4"))
1640+
.run_with_zero_port()
1641+
.with_config(|config| assert_eq!(config.store.state_cache_size, 4_usize));
1642+
}
1643+
#[test]
16371644
fn auto_compact_db_flag() {
16381645
CommandLineTest::new()
16391646
.flag("auto-compact-db", Some("false"))

0 commit comments

Comments
 (0)