Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix and optimize query profiling #57095

Merged
merged 2 commits into from
Jan 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/librustc/ty/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1248,7 +1248,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {

sync::assert_send_val(&gcx);

tls::enter_global(gcx, f)
let r = tls::enter_global(gcx, f);

gcx.queries.record_computed_queries(s);

r
}

pub fn consider_optimizing<T: Fn() -> String>(&self, msg: T) -> bool {
Expand Down
1 change: 1 addition & 0 deletions src/librustc/ty/query/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ use ty::subst::Substs;
use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet};
use util::common::{ErrorReported};
use util::profiling::ProfileCategory::*;
use session::Session;

use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::indexed_vec::IndexVec;
Expand Down
47 changes: 25 additions & 22 deletions src/librustc/ty/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,11 +112,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
let mut lock = cache.borrow_mut();
if let Some(value) = lock.results.get(key) {
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
tcx.sess.profiler(|p| {
p.record_query(Q::CATEGORY);
p.record_query_hit(Q::CATEGORY);
});

tcx.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
let result = Ok((value.value.clone(), value.index));
#[cfg(debug_assertions)]
{
Expand Down Expand Up @@ -195,6 +191,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
/// Executes a job by changing the ImplicitCtxt to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
#[inline(always)]
pub(super) fn start<'lcx, F, R>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
Expand Down Expand Up @@ -382,13 +379,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
)
);

self.sess.profiler(|p| p.record_query(Q::CATEGORY));

let job = match JobOwner::try_get(self, span, &key) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::JobCompleted(result) => {
return result.map(|(v, index)| {
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
self.dep_graph.read_index(index);
v
})
Expand Down Expand Up @@ -430,9 +424,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {

if !dep_node.kind.is_input() {
if let Some(dep_node_index) = self.try_mark_green_and_read(&dep_node) {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));

return self.load_from_disk_and_cache_in_memory::<Q>(key,
job,
dep_node_index,
Expand Down Expand Up @@ -483,11 +474,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
};

let result = if let Some(result) = result {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));

result
} else {
// We could not load a result from the on-disk cache, so
// recompute.

self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a remark: There should probably be a third category here for things that could be re-used but are not stored in the cache. Not sure how to best handle this.


// The diagnostics for this query have already been
// promoted to the current session during
// try_mark_green(), so we can ignore them here.
Expand All @@ -498,6 +494,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
Q::compute(tcx, key)
})
});

self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
result
};

Expand Down Expand Up @@ -547,6 +545,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
for {:?}", dep_node);
}

#[inline(always)]
fn force_query_with_job<Q: QueryDescription<'gcx>>(
self,
key: Q::Key,
Expand All @@ -565,10 +564,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
key, dep_node);

profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| {
p.start_activity(Q::CATEGORY);
p.record_query(Q::CATEGORY);
});
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));

let res = job.start(self, |tcx| {
if dep_node.kind.is_eval_always() {
Expand Down Expand Up @@ -624,14 +620,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.

self.sess.profiler(|p| {
p.start_activity(Q::CATEGORY);
p.record_query(Q::CATEGORY);
});

let _ = self.get_query::<Q>(DUMMY_SP, key);

self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
} else {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY));
}
}

Expand Down Expand Up @@ -751,6 +743,17 @@ macro_rules! define_queries_inner {
}
}

pub fn record_computed_queries(&self, sess: &Session) {
sess.profiler(|p| {
$(
p.record_computed_queries(
<queries::$name<'_> as QueryConfig<'_>>::CATEGORY,
self.$name.lock().results.len()
);
)*
});
}

#[cfg(parallel_queries)]
pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
let mut jobs = Vec::new();
Expand Down
16 changes: 9 additions & 7 deletions src/librustc/util/profiling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ macro_rules! define_categories {
let total_time = ($(self.times.$name + )* 0) as f32;

$(
let (hits, total) = self.query_counts.$name;
let (hits, computed) = self.query_counts.$name;
let total = hits + computed;
let (hits, total) = if total > 0 {
(format!("{:.2}",
(((hits as f32) / (total as f32)) * 100.0)), total.to_string())
Expand All @@ -86,7 +87,8 @@ macro_rules! define_categories {
let mut json = String::from("[");

$(
let (hits, total) = self.query_counts.$name;
let (hits, computed) = self.query_counts.$name;
let total = hits + computed;

//normalize hits to 0%
let hit_percent =
Expand Down Expand Up @@ -168,14 +170,14 @@ impl SelfProfiler {
self.timer_stack.push(category);
}

pub fn record_query(&mut self, category: ProfileCategory) {
let (hits, total) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits, total + 1));
pub fn record_computed_queries(&mut self, category: ProfileCategory, count: usize) {
let (hits, computed) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits, computed + count as u64));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

An issue for another PR: This looks like we could get missed updates here (and below) with parallel queries.

}

pub fn record_query_hit(&mut self, category: ProfileCategory) {
let (hits, total) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits + 1, total));
let (hits, computed) = *self.data.query_counts.get(category);
self.data.query_counts.set(category, (hits + 1, computed));
}

pub fn end_activity(&mut self, category: ProfileCategory) {
Expand Down