diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index f4c1df145af35..ec67702507fbd 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -3,23 +3,23 @@ use std::mem; use rustc_data_structures::hash_table::{Entry, HashTable}; use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::{outline, sharded, sync}; use rustc_errors::{Diag, FatalError, StashKey}; use rustc_middle::dep_graph::{DepGraphData, DepNodeKey}; use rustc_middle::query::plumbing::QueryVTable; use rustc_middle::query::{ ActiveKeyStatus, CycleError, CycleErrorHandling, EnsureMode, QueryCache, QueryJob, QueryJobId, - QueryLatch, QueryMode, QueryStackDeferred, QueryStackFrame, QueryState, + QueryKey, QueryLatch, QueryMode, QueryState, }; use rustc_middle::ty::TyCtxt; use rustc_middle::verify_ich::incremental_verify_ich; use rustc_span::{DUMMY_SP, Span}; +use crate::collect_active_jobs_from_all_queries; use crate::dep_graph::{DepNode, DepNodeIndex}; use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle}; -use crate::plumbing::{ - collect_active_jobs_from_all_queries, current_query_job, next_job_id, start_query, -}; +use crate::plumbing::{current_query_job, next_job_id, start_query}; #[inline] fn equivalent_key(k: &K) -> impl Fn(&(K, V)) -> bool + '_ { @@ -43,18 +43,25 @@ pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool { /// Internal plumbing for collecting the set of active jobs for this query. /// -/// Should only be called from `gather_active_jobs`. -pub(crate) fn gather_active_jobs_inner<'tcx, K: Copy>( - state: &QueryState<'tcx, K>, +/// Should only be called from `collect_active_jobs_from_all_queries`. +/// +/// (We arbitrarily use the word "gather" when collecting the jobs for +/// each individual query, so that we have distinct function names to +/// grep for.) +pub(crate) fn gather_active_jobs<'tcx, C>( + query: &'tcx QueryVTable<'tcx, C>, tcx: TyCtxt<'tcx>, - make_frame: fn(TyCtxt<'tcx>, K) -> QueryStackFrame>, require_complete: bool, job_map_out: &mut QueryJobMap<'tcx>, // Out-param; job info is gathered into this map -) -> Option<()> { +) -> Option<()> +where + C: QueryCache, + QueryVTable<'tcx, C>: DynSync, +{ let mut active = Vec::new(); // Helper to gather active jobs from a single shard. - let mut gather_shard_jobs = |shard: &HashTable<(K, ActiveKeyStatus<'tcx>)>| { + let mut gather_shard_jobs = |shard: &HashTable<(C::Key, ActiveKeyStatus<'tcx>)>| { for (k, v) in shard.iter() { if let ActiveKeyStatus::Started(ref job) = *v { active.push((*k, job.clone())); @@ -64,22 +71,33 @@ pub(crate) fn gather_active_jobs_inner<'tcx, K: Copy>( // Lock shards and gather jobs from each shard. if require_complete { - for shard in state.active.lock_shards() { + for shard in query.state.active.lock_shards() { gather_shard_jobs(&shard); } } else { // We use try_lock_shards here since we are called from the // deadlock handler, and this shouldn't be locked. - for shard in state.active.try_lock_shards() { - let shard = shard?; - gather_shard_jobs(&shard); + for shard in query.state.active.try_lock_shards() { + // This can be called during unwinding, and the function has a `try_`-prefix, so + // don't `unwrap()` here, just manually check for `None` and do best-effort error + // reporting. + match shard { + None => { + tracing::warn!( + "Failed to collect active jobs for query with name `{}`!", + query.name + ); + return None; + } + Some(shard) => gather_shard_jobs(&shard), + } } } // Call `make_frame` while we're not holding a `state.active` lock as `make_frame` may call // queries leading to a deadlock. for (key, job) in active { - let frame = make_frame(tcx, key); + let frame = crate::plumbing::create_deferred_query_stack_frame(tcx, query, key); job_map_out.insert(job.id, QueryJobInfo { frame, job }); } diff --git a/compiler/rustc_query_impl/src/job.rs b/compiler/rustc_query_impl/src/job.rs index 2d9824a783ea5..228c611df3654 100644 --- a/compiler/rustc_query_impl/src/job.rs +++ b/compiler/rustc_query_impl/src/job.rs @@ -14,7 +14,7 @@ use rustc_middle::ty::TyCtxt; use rustc_session::Session; use rustc_span::{DUMMY_SP, Span}; -use crate::plumbing::collect_active_jobs_from_all_queries; +use crate::collect_active_jobs_from_all_queries; /// Map from query job IDs to job information collected by /// `collect_active_jobs_from_all_queries`. @@ -26,7 +26,7 @@ pub struct QueryJobMap<'tcx> { impl<'tcx> QueryJobMap<'tcx> { /// Adds information about a job ID to the job map. /// - /// Should only be called by `gather_active_jobs_inner`. + /// Should only be called by `gather_active_jobs`. pub(crate) fn insert(&mut self, id: QueryJobId, info: QueryJobInfo<'tcx>) { self.map.insert(id, info); } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 1b93ffe945b3d..875dd07ff25f0 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -19,10 +19,8 @@ use rustc_span::Span; pub use crate::dep_kind_vtables::make_dep_kind_vtables; pub use crate::job::{QueryJobMap, break_query_cycles, print_query_stack}; -pub use crate::plumbing::{collect_active_jobs_from_all_queries, query_key_hash_verify_all}; -use crate::plumbing::{encode_all_query_results, try_mark_green}; +use crate::plumbing::try_mark_green; use crate::profiling_support::QueryKeyStringCache; -pub use crate::profiling_support::alloc_self_profile_query_strings; use crate::values::Value; #[macro_use] diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 8920f8dba38d1..92a4b7ff2b659 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -29,9 +29,10 @@ use rustc_middle::ty::{self, TyCtxt}; use rustc_serialize::{Decodable, Encodable}; use rustc_span::def_id::LOCAL_CRATE; +use crate::collect_active_jobs_from_all_queries; use crate::error::{QueryOverflow, QueryOverflowNote}; use crate::execution::{all_inactive, force_query}; -use crate::job::{QueryJobMap, find_dep_kind_root}; +use crate::job::find_dep_kind_root; fn depth_limit_error<'tcx>(tcx: TyCtxt<'tcx>, job: QueryJobId) { let job_map = @@ -94,56 +95,10 @@ pub(crate) fn start_query<'tcx, R>( }) } -/// Returns a map of currently active query jobs, collected from all queries. -/// -/// If `require_complete` is `true`, this function locks all shards of the -/// query results to produce a complete map, which always returns `Ok`. -/// Otherwise, it may return an incomplete map as an error if any shard -/// lock cannot be acquired. -/// -/// Prefer passing `false` to `require_complete` to avoid potential deadlocks, -/// especially when called from within a deadlock handler, unless a -/// complete map is needed and no deadlock is possible at this call site. -pub fn collect_active_jobs_from_all_queries<'tcx>( - tcx: TyCtxt<'tcx>, - require_complete: bool, -) -> Result, QueryJobMap<'tcx>> { - let mut job_map_out = QueryJobMap::default(); - let mut complete = true; - - for gather_fn in crate::PER_QUERY_GATHER_ACTIVE_JOBS_FNS.iter() { - if gather_fn(tcx, require_complete, &mut job_map_out).is_none() { - complete = false; - } - } - - if complete { Ok(job_map_out) } else { Err(job_map_out) } -} - pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool { tcx.dep_graph.try_mark_green(tcx, dep_node).is_some() } -pub(super) fn encode_all_query_results<'tcx>( - tcx: TyCtxt<'tcx>, - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, -) { - for encode in super::ENCODE_QUERY_RESULTS.iter().copied().flatten() { - encode(tcx, encoder, query_result_index); - } -} - -pub fn query_key_hash_verify_all<'tcx>(tcx: TyCtxt<'tcx>) { - if tcx.sess.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions) { - tcx.sess.time("query_key_hash_verify_all", || { - for verify in super::QUERY_KEY_HASH_VERIFY.iter() { - verify(tcx); - } - }) - } -} - macro_rules! cycle_error_handling { ([]) => {{ rustc_middle::query::CycleErrorHandling::Error @@ -313,8 +268,7 @@ pub(crate) fn create_deferred_query_stack_frame<'tcx, C>( key: C::Key, ) -> QueryStackFrame> where - C: QueryCache, - C::Key: QueryKey + DynSend + DynSync, + C: QueryCache, QueryVTable<'tcx, C>: DynSync, { let kind = vtable.dep_kind; @@ -326,7 +280,7 @@ where QueryStackFrame::new(info, kind, def_id, def_id_for_ty_in_cycle) } -pub(crate) fn encode_query_results_inner<'a, 'tcx, C, V>( +pub(crate) fn encode_query_results<'a, 'tcx, C, V>( tcx: TyCtxt<'tcx>, query: &'tcx QueryVTable<'tcx, C>, encoder: &mut CacheEncoder<'a, 'tcx>, @@ -628,125 +582,106 @@ macro_rules! define_queries { &tcx.query_system.query_vtables.$name } } + })*} - /// Internal per-query plumbing for collecting the set of active jobs for this query. - /// - /// Should only be called through `PER_QUERY_GATHER_ACTIVE_JOBS_FNS`. - pub(crate) fn gather_active_jobs<'tcx>( - tcx: TyCtxt<'tcx>, - require_complete: bool, - job_map_out: &mut QueryJobMap<'tcx>, - ) -> Option<()> { - let make_frame = |tcx: TyCtxt<'tcx>, key| { - let vtable = &tcx.query_system.query_vtables.$name; - $crate::plumbing::create_deferred_query_stack_frame(tcx, vtable, key) - }; - - // Call `gather_active_jobs_inner` to do the actual work. - let res = crate::execution::gather_active_jobs_inner( - &tcx.query_system.query_vtables.$name.state, + pub fn make_query_vtables<'tcx>(incremental: bool) -> queries::QueryVTables<'tcx> { + queries::QueryVTables { + $( + $name: query_impl::$name::make_query_vtable(incremental), + )* + } + } + + /// Returns a map of currently active query jobs, collected from all queries. + /// + /// If `require_complete` is `true`, this function locks all shards of the + /// query results to produce a complete map, which always returns `Ok`. + /// Otherwise, it may return an incomplete map as an error if any shard + /// lock cannot be acquired. + /// + /// Prefer passing `false` to `require_complete` to avoid potential deadlocks, + /// especially when called from within a deadlock handler, unless a + /// complete map is needed and no deadlock is possible at this call site. + pub fn collect_active_jobs_from_all_queries<'tcx>( + tcx: TyCtxt<'tcx>, + require_complete: bool, + ) -> Result, QueryJobMap<'tcx>> { + let mut job_map_out = QueryJobMap::default(); + let mut complete = true; + + $( + let res = crate::execution::gather_active_jobs( + &tcx.query_system.query_vtables.$name, tcx, - make_frame, require_complete, - job_map_out, + &mut job_map_out, ); - - // this can be called during unwinding, and the function has a `try_`-prefix, so - // don't `unwrap()` here, just manually check for `None` and do best-effort error - // reporting. if res.is_none() { - tracing::warn!( - "Failed to collect active jobs for query with name `{}`!", - stringify!($name) - ); + complete = false; } - res + )* + + if complete { Ok(job_map_out) } else { Err(job_map_out) } + } + + /// All self-profiling events generated by the query engine use + /// virtual `StringId`s for their `event_id`. This method makes all + /// those virtual `StringId`s point to actual strings. + /// + /// If we are recording only summary data, the ids will point to + /// just the query names. If we are recording query keys too, we + /// allocate the corresponding strings here. + pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) { + if !tcx.prof.enabled() { + return; } - pub(crate) fn alloc_self_profile_query_strings<'tcx>( - tcx: TyCtxt<'tcx>, - string_cache: &mut QueryKeyStringCache - ) { + let _prof_timer = tcx.sess.prof.generic_activity("self_profile_alloc_query_strings"); + + let mut string_cache = QueryKeyStringCache::new(); + + $( $crate::profiling_support::alloc_self_profile_query_strings_for_query_cache( tcx, stringify!($name), &tcx.query_system.query_vtables.$name.cache, - string_cache, - ) - } + &mut string_cache, + ); + )* - item_if_cache_on_disk! { [$($modifiers)*] - pub(crate) fn encode_query_results<'tcx>( - tcx: TyCtxt<'tcx>, - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex - ) { - $crate::plumbing::encode_query_results_inner( + tcx.sess.prof.store_query_cache_hits(); + } + + fn encode_all_query_results<'tcx>( + tcx: TyCtxt<'tcx>, + encoder: &mut CacheEncoder<'_, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex, + ) { + $( + item_if_cache_on_disk! { + [$($modifiers)*] + $crate::plumbing::encode_query_results( tcx, &tcx.query_system.query_vtables.$name, encoder, query_result_index, ) } - } - - pub(crate) fn query_key_hash_verify<'tcx>(tcx: TyCtxt<'tcx>) { - $crate::plumbing::query_key_hash_verify( - &tcx.query_system.query_vtables.$name, - tcx, - ) - } - })*} - - pub fn make_query_vtables<'tcx>(incremental: bool) -> queries::QueryVTables<'tcx> { - queries::QueryVTables { - $( - $name: query_impl::$name::make_query_vtable(incremental), - )* - } + )* } - // These arrays are used for iteration and can't be indexed by `DepKind`. - - /// Used by `collect_active_jobs_from_all_queries` to iterate over all - /// queries, and gather the active jobs for each query. - /// - /// (We arbitrarily use the word "gather" when collecting the jobs for - /// each individual query, so that we have distinct function names to - /// grep for.) - const PER_QUERY_GATHER_ACTIVE_JOBS_FNS: &[ - for<'tcx> fn( - tcx: TyCtxt<'tcx>, - require_complete: bool, - job_map_out: &mut QueryJobMap<'tcx>, - ) -> Option<()> - ] = &[ - $( $crate::query_impl::$name::gather_active_jobs ),* - ]; - - const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[ - for<'tcx> fn(TyCtxt<'tcx>, &mut QueryKeyStringCache) - ] = &[$(query_impl::$name::alloc_self_profile_query_strings),*]; - - const ENCODE_QUERY_RESULTS: &[ - Option fn( - TyCtxt<'tcx>, - &mut CacheEncoder<'_, 'tcx>, - &mut EncodedDepNodeIndex) - > - ] = &[ - $( - if_cache_on_disk!([$($modifiers)*] { - Some(query_impl::$name::encode_query_results) - } { - None + pub fn query_key_hash_verify_all<'tcx>(tcx: TyCtxt<'tcx>) { + if tcx.sess.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions) { + tcx.sess.time("query_key_hash_verify_all", || { + $( + $crate::plumbing::query_key_hash_verify( + &tcx.query_system.query_vtables.$name, + tcx + ); + )* }) - ),* - ]; - - const QUERY_KEY_HASH_VERIFY: &[ - for<'tcx> fn(TyCtxt<'tcx>) - ] = &[$(query_impl::$name::query_key_hash_verify),*]; + } + } /// Declares a dep-kind vtable constructor for each query. mod _dep_kind_vtable_ctors_for_queries { diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs index 679fee49b6c6d..cd6979a7c22ca 100644 --- a/compiler/rustc_query_impl/src/profiling_support.rs +++ b/compiler/rustc_query_impl/src/profiling_support.rs @@ -14,7 +14,7 @@ pub(crate) struct QueryKeyStringCache { } impl QueryKeyStringCache { - fn new() -> QueryKeyStringCache { + pub(crate) fn new() -> QueryKeyStringCache { QueryKeyStringCache { def_id_cache: Default::default() } } } @@ -239,25 +239,3 @@ pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( } }); } - -/// All self-profiling events generated by the query engine use -/// virtual `StringId`s for their `event_id`. This method makes all -/// those virtual `StringId`s point to actual strings. -/// -/// If we are recording only summary data, the ids will point to -/// just the query names. If we are recording query keys too, we -/// allocate the corresponding strings here. -pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) { - if !tcx.prof.enabled() { - return; - } - - let _prof_timer = tcx.sess.prof.generic_activity("self_profile_alloc_query_strings"); - - let mut string_cache = QueryKeyStringCache::new(); - - for alloc in super::ALLOC_SELF_PROFILE_QUERY_STRINGS.iter() { - alloc(tcx, &mut string_cache) - } - tcx.sess.prof.store_query_cache_hits(); -}