Skip to content

Commit

Permalink
Rollup merge of rust-lang#89784 - Mark-Simulacrum:delete-cache-hit-tr…
Browse files Browse the repository at this point in the history
…acking, r=petrochenkov

Remove built-in query cache_hit tracking

This was already only enabled in debug_assertions builds. Generally, it seems
like most use cases that would use this could also use the -Zself-profile flag
which also tracks cache hits (in all builds), and so the extra cfg's and such
are not really necessary.

This is largely just a small cleanup though, which primarily is intended to make
other changes easier by avoiding the need to deal with this field.
  • Loading branch information
the8472 authored Oct 12, 2021
2 parents 8709dde + 1273738 commit 23f4d61
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 49 deletions.
27 changes: 0 additions & 27 deletions compiler/rustc_query_impl/src/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@ use rustc_query_system::query::{QueryCache, QueryCacheStore};

use std::any::type_name;
use std::mem;
#[cfg(debug_assertions)]
use std::sync::atomic::Ordering;

trait KeyStats {
fn key_stats(&self, stats: &mut QueryStats);
Expand All @@ -27,7 +25,6 @@ impl KeyStats for DefId {
#[derive(Clone)]
struct QueryStats {
name: &'static str,
cache_hits: usize,
key_size: usize,
key_type: &'static str,
value_size: usize,
Expand All @@ -42,10 +39,6 @@ where
{
let mut stats = QueryStats {
name,
#[cfg(debug_assertions)]
cache_hits: map.cache_hits.load(Ordering::Relaxed),
#[cfg(not(debug_assertions))]
cache_hits: 0,
key_size: mem::size_of::<C::Key>(),
key_type: type_name::<C::Key>(),
value_size: mem::size_of::<C::Value>(),
Expand All @@ -63,12 +56,6 @@ where
pub fn print_stats(tcx: TyCtxt<'_>) {
let queries = query_stats(tcx);

if cfg!(debug_assertions) {
let hits: usize = queries.iter().map(|s| s.cache_hits).sum();
let results: usize = queries.iter().map(|s| s.entry_count).sum();
eprintln!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64);
}

let mut query_key_sizes = queries.clone();
query_key_sizes.sort_by_key(|q| q.key_size);
eprintln!("\nLarge query keys:");
Expand All @@ -83,20 +70,6 @@ pub fn print_stats(tcx: TyCtxt<'_>) {
eprintln!(" {} - {} x {} - {}", q.name, q.value_size, q.entry_count, q.value_type);
}

if cfg!(debug_assertions) {
let mut query_cache_hits = queries.clone();
query_cache_hits.sort_by_key(|q| q.cache_hits);
eprintln!("\nQuery cache hits:");
for q in query_cache_hits.iter().rev() {
eprintln!(
" {} - {} ({}%)",
q.name,
q.cache_hits,
q.cache_hits as f64 / (q.cache_hits + q.entry_count) as f64
);
}
}

let mut query_value_count = queries.clone();
query_value_count.sort_by_key(|q| q.entry_count);
eprintln!("\nQuery value count:");
Expand Down
23 changes: 1 addition & 22 deletions compiler/rustc_query_system/src/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,24 +26,15 @@ use std::hash::{Hash, Hasher};
use std::mem;
use std::num::NonZeroU32;
use std::ptr;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};

pub struct QueryCacheStore<C: QueryCache> {
cache: C,
shards: Sharded<C::Sharded>,
#[cfg(debug_assertions)]
pub cache_hits: AtomicUsize,
}

impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
fn default() -> Self {
Self {
cache: C::default(),
shards: Default::default(),
#[cfg(debug_assertions)]
cache_hits: AtomicUsize::new(0),
}
Self { cache: C::default(), shards: Default::default() }
}
}

Expand Down Expand Up @@ -377,10 +368,6 @@ where
if unlikely!(tcx.profiler().enabled()) {
tcx.profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
}
tcx.dep_graph().read_index(index);
on_hit(value)
})
Expand Down Expand Up @@ -429,10 +416,6 @@ where
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
}
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());

(v, Some(index))
Expand Down Expand Up @@ -705,10 +688,6 @@ where
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
}
});

let lookup = match cached {
Expand Down

0 comments on commit 23f4d61

Please sign in to comment.