Skip to content

Commit

Permalink
[refactor] Move Caches to re_viewer_ctx and make it generic (#2043)
Browse files Browse the repository at this point in the history
* trait & management of generic caches

* make mesh cache one of the generic caches

* make tensorcache a generic cache

* add caches lazily on the fly

* tensor stats cache is now a generic cache

* Move caches to re_viewer_context

* all (central viewer context) cache access functions are now called `entry` for brevity & consistency!

* comment on future bytes_used method for `Cache` trait

* doc string fix
  • Loading branch information
Wumpf authored May 4, 2023
1 parent 14cdeb9 commit c5c3242
Show file tree
Hide file tree
Showing 16 changed files with 263 additions and 185 deletions.
4 changes: 2 additions & 2 deletions crates/re_viewer/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ use re_log_types::{ApplicationId, LogMsg, RecordingId};
use re_renderer::WgpuResourcePoolStatistics;
use re_smart_channel::Receiver;
use re_ui::{toasts, Command};
use re_viewer_context::AppOptions;
use re_viewer_context::{AppOptions, Caches};

use crate::{
misc::{time_control::PlayState, Caches, RecordingConfig, ViewerContext},
misc::{time_control::PlayState, RecordingConfig, ViewerContext},
ui::{data_ui::ComponentUiRegistry, Blueprint},
viewer_analytics::ViewerAnalytics,
};
Expand Down
15 changes: 14 additions & 1 deletion crates/re_viewer/src/misc/caches/mesh_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use std::sync::Arc;

use re_log_types::{Mesh3D, MeshId};
use re_renderer::RenderContext;
use re_viewer_context::Cache;

use crate::mesh_loader::LoadedMesh;

Expand All @@ -11,7 +12,7 @@ use crate::mesh_loader::LoadedMesh;
pub struct MeshCache(nohash_hasher::IntMap<MeshId, Option<Arc<LoadedMesh>>>);

impl MeshCache {
pub fn load(
pub fn entry(
&mut self,
name: &str,
mesh: &Mesh3D,
Expand Down Expand Up @@ -39,3 +40,15 @@ impl MeshCache {
.clone()
}
}

impl Cache for MeshCache {
fn begin_frame(&mut self) {}

fn purge_memory(&mut self) {
self.0.clear();
}

fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
130 changes: 6 additions & 124 deletions crates/re_viewer/src/misc/caches/mod.rs
Original file line number Diff line number Diff line change
@@ -1,127 +1,9 @@
mod mesh_cache;
mod tensor_decode_cache;
mod tensor_stats;
mod tensor_stats_cache;

use re_log_types::component_types;

/// Does memoization of different things for the immediate mode UI.
#[derive(Default)]
pub struct Caches {
/// Cached decoded tensors.
pub decode: tensor_decode_cache::DecodeCache,

/// Cached loaded meshes (from file or converted from user data).
pub mesh: mesh_cache::MeshCache,

tensor_stats: nohash_hasher::IntMap<component_types::TensorId, TensorStats>,
}

impl Caches {
/// Call once per frame to potentially flush the cache(s).
pub fn begin_frame(&mut self) {
#[cfg(not(target_arch = "wasm32"))]
let max_decode_cache_use = 4_000_000_000;

#[cfg(target_arch = "wasm32")]
let max_decode_cache_use = 1_000_000_000;

self.decode.begin_frame(max_decode_cache_use);
}

pub fn purge_memory(&mut self) {
let Self {
decode,
tensor_stats,
mesh: _, // TODO(emilk)
} = self;
decode.purge_memory();
tensor_stats.clear();
}

pub fn tensor_stats(&mut self, tensor: &re_log_types::component_types::Tensor) -> &TensorStats {
self.tensor_stats
.entry(tensor.tensor_id)
.or_insert_with(|| TensorStats::new(tensor))
}
}

#[derive(Clone, Copy, Debug)]
pub struct TensorStats {
/// This will currently only be `None` for jpeg-encoded tensors.
pub range: Option<(f64, f64)>,
}

impl TensorStats {
fn new(tensor: &re_log_types::component_types::Tensor) -> Self {
use half::f16;
use ndarray::ArrayViewD;
use re_log_types::TensorDataType;

macro_rules! declare_tensor_range_int {
($name: ident, $typ: ty) => {
fn $name(tensor: ndarray::ArrayViewD<'_, $typ>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor
.fold((<$typ>::MAX, <$typ>::MIN), |(min, max), &value| {
(min.min(value), max.max(value))
});
(min as f64, max as f64)
}
};
}

macro_rules! declare_tensor_range_float {
($name: ident, $typ: ty) => {
fn $name(tensor: ndarray::ArrayViewD<'_, $typ>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor.fold(
(<$typ>::INFINITY, <$typ>::NEG_INFINITY),
|(min, max), &value| (min.min(value), max.max(value)),
);
#[allow(trivial_numeric_casts)]
(min as f64, max as f64)
}
};
}

declare_tensor_range_int!(tensor_range_u8, u8);
declare_tensor_range_int!(tensor_range_u16, u16);
declare_tensor_range_int!(tensor_range_u32, u32);
declare_tensor_range_int!(tensor_range_u64, u64);

declare_tensor_range_int!(tensor_range_i8, i8);
declare_tensor_range_int!(tensor_range_i16, i16);
declare_tensor_range_int!(tensor_range_i32, i32);
declare_tensor_range_int!(tensor_range_i64, i64);

// declare_tensor_range_float!(tensor_range_f16, half::f16);
declare_tensor_range_float!(tensor_range_f32, f32);
declare_tensor_range_float!(tensor_range_f64, f64);

#[allow(clippy::needless_pass_by_value)]
fn tensor_range_f16(tensor: ndarray::ArrayViewD<'_, f16>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor
.fold((f16::INFINITY, f16::NEG_INFINITY), |(min, max), &value| {
(min.min(value), max.max(value))
});
(min.to_f64(), max.to_f64())
}

let range = match tensor.dtype() {
TensorDataType::U8 => ArrayViewD::<u8>::try_from(tensor).map(tensor_range_u8),
TensorDataType::U16 => ArrayViewD::<u16>::try_from(tensor).map(tensor_range_u16),
TensorDataType::U32 => ArrayViewD::<u32>::try_from(tensor).map(tensor_range_u32),
TensorDataType::U64 => ArrayViewD::<u64>::try_from(tensor).map(tensor_range_u64),

TensorDataType::I8 => ArrayViewD::<i8>::try_from(tensor).map(tensor_range_i8),
TensorDataType::I16 => ArrayViewD::<i16>::try_from(tensor).map(tensor_range_i16),
TensorDataType::I32 => ArrayViewD::<i32>::try_from(tensor).map(tensor_range_i32),
TensorDataType::I64 => ArrayViewD::<i64>::try_from(tensor).map(tensor_range_i64),
TensorDataType::F16 => ArrayViewD::<f16>::try_from(tensor).map(tensor_range_f16),
TensorDataType::F32 => ArrayViewD::<f32>::try_from(tensor).map(tensor_range_f32),
TensorDataType::F64 => ArrayViewD::<f64>::try_from(tensor).map(tensor_range_f64),
};

Self { range: range.ok() }
}
}
pub use mesh_cache::MeshCache;
pub use tensor_decode_cache::TensorDecodeCache;
pub use tensor_stats::TensorStats;
pub use tensor_stats_cache::TensorStatsCache;
29 changes: 20 additions & 9 deletions crates/re_viewer/src/misc/caches/tensor_decode_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use re_log_types::{
component_types::{Tensor, TensorId, TensorImageLoadError},
DecodedTensor,
};
use re_viewer_context::Cache;

// ----------------------------------------------------------------------------

Expand All @@ -12,26 +13,26 @@ struct DecodedTensorResult {
/// Total memory used by this `Tensor`.
memory_used: u64,

/// Which [`DecodeCache::generation`] was this `Tensor` last used?
/// Which [`TensorDecodeCache::generation`] was this `Tensor` last used?
last_use_generation: u64,
}

/// A cache of decoded [`Tensor`] entities, indexed by `TensorId`.
#[derive(Default)]
pub struct DecodeCache {
pub struct TensorDecodeCache {
cache: nohash_hasher::IntMap<TensorId, DecodedTensorResult>,
memory_used: u64,
generation: u64,
}

#[allow(clippy::map_err_ignore)]
impl DecodeCache {
impl TensorDecodeCache {
/// Decode a [`Tensor`] if necessary and cache the result.
///
/// This is a no-op for Tensors that are not compressed.
///
/// Currently supports JPEG encoded tensors.
pub fn try_decode_tensor_if_necessary(
pub fn entry(
&mut self,
maybe_encoded_tensor: Tensor,
) -> Result<DecodedTensor, TensorImageLoadError> {
Expand Down Expand Up @@ -60,21 +61,27 @@ impl DecodeCache {
}
}
}
}

impl Cache for TensorDecodeCache {
fn begin_frame(&mut self) {
#[cfg(not(target_arch = "wasm32"))]
let max_decode_cache_use = 4_000_000_000;

#[cfg(target_arch = "wasm32")]
let max_decode_cache_use = 1_000_000_000;

/// Call once per frame to (potentially) flush the cache.
pub fn begin_frame(&mut self, max_memory_use: u64) {
// TODO(jleibs): a more incremental purging mechanism, maybe switching to an LRU Cache
// would likely improve the behavior.

if self.memory_used > max_memory_use {
if self.memory_used > max_decode_cache_use {
self.purge_memory();
}

self.generation += 1;
}

/// Attempt to free up memory.
pub fn purge_memory(&mut self) {
fn purge_memory(&mut self) {
crate::profile_function!();

// Very aggressively flush everything not used in this frame
Expand All @@ -95,4 +102,8 @@ impl DecodeCache {
self.memory_used as f64 / 1e9,
);
}

fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
81 changes: 81 additions & 0 deletions crates/re_viewer/src/misc/caches/tensor_stats.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#[derive(Clone, Copy, Debug)]
pub struct TensorStats {
/// This will currently only be `None` for jpeg-encoded tensors.
pub range: Option<(f64, f64)>,
}

impl TensorStats {
pub fn new(tensor: &re_log_types::component_types::Tensor) -> Self {
use half::f16;
use ndarray::ArrayViewD;
use re_log_types::TensorDataType;

macro_rules! declare_tensor_range_int {
($name: ident, $typ: ty) => {
fn $name(tensor: ndarray::ArrayViewD<'_, $typ>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor
.fold((<$typ>::MAX, <$typ>::MIN), |(min, max), &value| {
(min.min(value), max.max(value))
});
(min as f64, max as f64)
}
};
}

macro_rules! declare_tensor_range_float {
($name: ident, $typ: ty) => {
fn $name(tensor: ndarray::ArrayViewD<'_, $typ>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor.fold(
(<$typ>::INFINITY, <$typ>::NEG_INFINITY),
|(min, max), &value| (min.min(value), max.max(value)),
);
#[allow(trivial_numeric_casts)]
(min as f64, max as f64)
}
};
}

declare_tensor_range_int!(tensor_range_u8, u8);
declare_tensor_range_int!(tensor_range_u16, u16);
declare_tensor_range_int!(tensor_range_u32, u32);
declare_tensor_range_int!(tensor_range_u64, u64);

declare_tensor_range_int!(tensor_range_i8, i8);
declare_tensor_range_int!(tensor_range_i16, i16);
declare_tensor_range_int!(tensor_range_i32, i32);
declare_tensor_range_int!(tensor_range_i64, i64);

// declare_tensor_range_float!(tensor_range_f16, half::f16);
declare_tensor_range_float!(tensor_range_f32, f32);
declare_tensor_range_float!(tensor_range_f64, f64);

#[allow(clippy::needless_pass_by_value)]
fn tensor_range_f16(tensor: ndarray::ArrayViewD<'_, f16>) -> (f64, f64) {
crate::profile_function!();
let (min, max) = tensor
.fold((f16::INFINITY, f16::NEG_INFINITY), |(min, max), &value| {
(min.min(value), max.max(value))
});
(min.to_f64(), max.to_f64())
}

let range = match tensor.dtype() {
TensorDataType::U8 => ArrayViewD::<u8>::try_from(tensor).map(tensor_range_u8),
TensorDataType::U16 => ArrayViewD::<u16>::try_from(tensor).map(tensor_range_u16),
TensorDataType::U32 => ArrayViewD::<u32>::try_from(tensor).map(tensor_range_u32),
TensorDataType::U64 => ArrayViewD::<u64>::try_from(tensor).map(tensor_range_u64),

TensorDataType::I8 => ArrayViewD::<i8>::try_from(tensor).map(tensor_range_i8),
TensorDataType::I16 => ArrayViewD::<i16>::try_from(tensor).map(tensor_range_i16),
TensorDataType::I32 => ArrayViewD::<i32>::try_from(tensor).map(tensor_range_i32),
TensorDataType::I64 => ArrayViewD::<i64>::try_from(tensor).map(tensor_range_i64),
TensorDataType::F16 => ArrayViewD::<f16>::try_from(tensor).map(tensor_range_f16),
TensorDataType::F32 => ArrayViewD::<f32>::try_from(tensor).map(tensor_range_f32),
TensorDataType::F64 => ArrayViewD::<f64>::try_from(tensor).map(tensor_range_f64),
};

Self { range: range.ok() }
}
}
27 changes: 27 additions & 0 deletions crates/re_viewer/src/misc/caches/tensor_stats_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
use re_log_types::{component_types, Tensor};
use re_viewer_context::Cache;

use super::TensorStats;

#[derive(Default)]
pub struct TensorStatsCache(nohash_hasher::IntMap<component_types::TensorId, TensorStats>);

impl TensorStatsCache {
pub fn entry(&mut self, tensor: &Tensor) -> &TensorStats {
self.0
.entry(tensor.tensor_id)
.or_insert_with(|| TensorStats::new(tensor))
}
}

impl Cache for TensorStatsCache {
fn begin_frame(&mut self) {}

fn purge_memory(&mut self) {
// Purging the tensor stats is not worth it - these are very small objects!
}

fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
2 changes: 0 additions & 2 deletions crates/re_viewer/src/misc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ pub(crate) mod time_control_ui;
mod transform_cache;
mod viewer_context;

pub use caches::Caches;

pub mod instance_hash_conversions;

#[cfg(not(target_arch = "wasm32"))]
Expand Down
4 changes: 2 additions & 2 deletions crates/re_viewer/src/misc/viewer_context.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use re_data_store::log_db::LogDb;
use re_viewer_context::{AppOptions, Item, ItemCollection, SelectionState};
use re_viewer_context::{AppOptions, Caches, Item, ItemCollection, SelectionState};

// TODO(andreas): Either viewer_context independent of these or move to re_viewer_context crate.
use super::{Caches, TimeControl};
use super::TimeControl;
use crate::ui::data_ui::ComponentUiRegistry;

/// Common things needed by many parts of the viewer.
Expand Down
Loading

0 comments on commit c5c3242

Please sign in to comment.