-
Notifications
You must be signed in to change notification settings - Fork 2.4k
perf: release execution cache lock before waiting for state root validation #22138
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -211,15 +211,16 @@ where | |
| }); | ||
| } | ||
|
|
||
| /// This method calls `ExecutionCache::update_with_guard` which requires exclusive access. | ||
| /// It should only be called after ensuring that: | ||
| /// 1. All prewarming tasks have completed execution | ||
| /// 2. No other concurrent operations are accessing the cache | ||
| /// | ||
| /// Saves the warmed caches back into the shared slot after prewarming completes. | ||
| /// | ||
| /// This consumes the `SavedCache` held by the task, which releases its usage guard and allows | ||
| /// the new, warmed cache to be inserted. | ||
| /// Waits for block validation without any lock held, then only on success splits the | ||
| /// saved cache, inserts state, and publishes under a brief write lock. This avoids | ||
| /// the ~100ms+ lock hold that previously blocked concurrent readers during | ||
| /// `valid_block_rx.recv()`. | ||
| /// | ||
| /// The ordering is critical: `split()` releases the usage guard and `insert_state()` | ||
| /// mutates the shared fixed-caches in-place, so both must happen only after | ||
| /// validation to prevent concurrent readers from observing unvalidated state. | ||
| /// | ||
| /// This method is called from `run()` only after all execution tasks are complete. | ||
| #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] | ||
|
|
@@ -236,36 +237,37 @@ where | |
|
|
||
| if let Some(saved_cache) = saved_cache { | ||
| debug!(target: "engine::caching", parent_hash=?hash, "Updating execution cache"); | ||
| // Perform all cache operations atomically under the lock | ||
| execution_cache.update_with_guard(|cached| { | ||
| // consumes the `SavedCache` held by the prewarming task, which releases its usage | ||
| // guard | ||
| let (caches, cache_metrics, disable_cache_metrics) = saved_cache.split(); | ||
| let new_cache = SavedCache::new(hash, caches, cache_metrics) | ||
| .with_disable_cache_metrics(disable_cache_metrics); | ||
|
|
||
| // Insert state into cache while holding the lock | ||
| // Access the BundleState through the shared ExecutionOutcome | ||
| if new_cache.cache().insert_state(&execution_outcome.state).is_err() { | ||
| // Clear the cache on error to prevent having a polluted cache | ||
| *cached = None; | ||
| debug!(target: "engine::caching", "cleared execution cache on update error"); | ||
| return; | ||
| } | ||
|
|
||
| // Wait for state root validation WITHOUT holding the cache lock. | ||
| // This is the key optimization: the original code held the lock across this | ||
| // blocking recv(), which blocked the next block's prewarming from accessing | ||
| // the cache for ~100ms+. | ||
| if valid_block_rx.recv().is_err() { | ||
|
Comment on lines
+241
to
+245
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. isnt this unproblematic anyways, because this fires once we have validated the block and the remaining ops here are cheap |
||
| debug!(target: "engine::caching", parent_hash=?hash, "skipped cache publish on invalid block"); | ||
| return; | ||
| } | ||
|
|
||
| // Block is valid — build and publish the warmed cache. | ||
| // split() consumes the SavedCache (releasing its usage guard) and insert_state() | ||
| // mutates the shared caches in-place, so these must only happen after validation | ||
| // to prevent concurrent readers from observing unvalidated state. | ||
| let (caches, cache_metrics, disable_cache_metrics) = saved_cache.split(); | ||
| let new_cache = SavedCache::new(hash, caches, cache_metrics) | ||
| .with_disable_cache_metrics(disable_cache_metrics); | ||
|
|
||
| if new_cache.cache().insert_state(&execution_outcome.state).is_err() { | ||
| execution_cache.update_with_guard(|cached| { | ||
| *cached = None; | ||
| }); | ||
| debug!(target: "engine::caching", "cleared execution cache on update error"); | ||
| } else { | ||
| new_cache.update_metrics(); | ||
|
|
||
| if valid_block_rx.recv().is_ok() { | ||
| // Replace the shared cache with the new one; the previous cache (if any) is | ||
| // dropped. | ||
| // Publish under a brief lock. | ||
| execution_cache.update_with_guard(|cached| { | ||
| *cached = Some(new_cache); | ||
| } else { | ||
| // Block was invalid; caches were already mutated by insert_state above, | ||
| // so we must clear to prevent using polluted state | ||
| *cached = None; | ||
| debug!(target: "engine::caching", "cleared execution cache on invalid block"); | ||
| } | ||
| }); | ||
| }); | ||
| } | ||
|
|
||
| let elapsed = start.elapsed(); | ||
| debug!(target: "engine::caching", parent_hash=?hash, elapsed=?elapsed, "Updated execution cache"); | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
there are no concurrent readers here, because the main exec thread is busy validating the block