Skip to content

Commit

Permalink
Make sure re_analytics never log higher than at debug level (#3014)
Browse files Browse the repository at this point in the history
* Closes #3012

### Checklist
* [x] I have read and agree to [Contributor
Guide](https://github.com/rerun-io/rerun/blob/main/CONTRIBUTING.md) and
the [Code of
Conduct](https://github.com/rerun-io/rerun/blob/main/CODE_OF_CONDUCT.md)
* [x] I've included a screenshot or gif (if applicable)
* [x] I have tested [demo.rerun.io](https://demo.rerun.io/pr/3014) (if
applicable)

- [PR Build Summary](https://build.rerun.io/pr/3014)
- [Docs
preview](https://rerun.io/preview/pr%3Aemilk%2Flower-analytics-log-levels/docs)
- [Examples
preview](https://rerun.io/preview/pr%3Aemilk%2Flower-analytics-log-levels/examples)
  • Loading branch information
emilk committed Aug 17, 2023
1 parent 13706cc commit 771d2de
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 27 deletions.
8 changes: 5 additions & 3 deletions crates/re_analytics/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
//! All the data we collect can be found in
//! <https://github.com/rerun-io/rerun/blob/latest/crates/re_viewer/src/viewer_analytics.rs>.
// We never use any log levels other than `trace` and `debug` because analytics is not important
// enough to require the attention of our users.

#[cfg(not(target_arch = "wasm32"))]
mod config_native;
#[cfg(not(target_arch = "wasm32"))]
Expand Down Expand Up @@ -47,7 +50,6 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;

use re_log::trace;
use time::OffsetDateTime;

// ----------------------------------------------------------------------------
Expand Down Expand Up @@ -253,13 +255,13 @@ pub struct Analytics {
impl Analytics {
pub fn new(tick: Duration) -> Result<Self, AnalyticsError> {
let config = Config::load()?;
trace!(?config, ?tick, "loaded analytics config");
re_log::trace!(?config, ?tick, "loaded analytics config");

if config.is_first_run() {
eprintln!("{DISCLAIMER}");

config.save()?;
trace!(?config, ?tick, "saved analytics config");
re_log::trace!(?config, ?tick, "saved analytics config");
}

let sink = PostHogSink::default();
Expand Down
48 changes: 24 additions & 24 deletions crates/re_analytics/src/pipeline_native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ use crossbeam::{
select,
};

use re_log::{error, trace, warn, warn_once};

use crate::{Config, Event, PostHogSink, SinkError};

// TODO(cmc): abstract away the concept of a `Pipeline` behind an actual trait when comes the time
Expand Down Expand Up @@ -81,13 +79,13 @@ impl Pipeline {
let analytics_id = &config.analytics_id;
let session_id = &config.session_id.to_string();

trace!(%analytics_id, %session_id, "pipeline catchup thread started");
re_log::trace!(%analytics_id, %session_id, "pipeline catchup thread started");
let res = flush_pending_events(&config, &sink);
trace!(%analytics_id, %session_id, ?res, "pipeline catchup thread shut down");
re_log::trace!(%analytics_id, %session_id, ?res, "pipeline catchup thread shut down");
}
})
{
re_log::warn!("Failed to spawn analytics thread: {err}");
re_log::debug!("Failed to spawn analytics thread: {err}");
}

if let Err(err) = std::thread::Builder::new().name("pipeline".into()).spawn({
Expand All @@ -97,13 +95,13 @@ impl Pipeline {
let analytics_id = &config.analytics_id;
let session_id = &config.session_id.to_string();

trace!(%analytics_id, %session_id, "pipeline thread started");
re_log::trace!(%analytics_id, %session_id, "pipeline thread started");
let res =
realtime_pipeline(&config, &sink, session_file, tick, &event_tx, &event_rx);
trace!(%analytics_id, %session_id, ?res, "pipeline thread shut down");
re_log::trace!(%analytics_id, %session_id, ?res, "pipeline thread shut down");
}
}) {
re_log::warn!("Failed to spawn analytics thread: {err}");
re_log::debug!("Failed to spawn analytics thread: {err}");
}

Ok(Some(Self { event_tx }))
Expand All @@ -120,15 +118,15 @@ fn try_send_event(event_tx: &channel::Sender<Result<Event, RecvError>>, event: E
match event_tx.try_send(Ok(event)) {
Ok(_) => {}
Err(channel::TrySendError::Full(_)) => {
trace!("dropped event, analytics channel is full");
re_log::trace!("dropped event, analytics channel is full");
}
Err(channel::TrySendError::Disconnected(_)) => {
// The only way this can happen is if the other end of the channel was previously
// closed, which we _never_ do.
// Technically, we should call `.unwrap()` here, but analytics _must never_ be the
// cause of a crash, so let's not take any unnecessary risk and just ignore the
// error instead.
warn_once!("dropped event, analytics channel is disconnected");
re_log::debug_once!("dropped event, analytics channel is disconnected");
}
}
}
Expand Down Expand Up @@ -157,18 +155,20 @@ fn flush_pending_events(config: &Config, sink: &PostHogSink) -> anyhow::Result<(
let Ok(mut session_file) = File::open(&path) else { continue; };
match flush_events(&mut session_file, &analytics_id, session_id, sink) {
Ok(_) => {
trace!(%analytics_id, %session_id, ?path, "flushed pending events");
re_log::trace!(%analytics_id, %session_id, ?path, "flushed pending events");
match std::fs::remove_file(&path) {
Ok(_) => trace!(%analytics_id, %session_id, ?path, "removed session file"),
Ok(_) => {
re_log::trace!(%analytics_id, %session_id, ?path, "removed session file");
}
Err(err) => {
// NOTE: this will eventually lead to duplicated data, though we'll be
// able to deduplicate it at query time.
trace!(%analytics_id, %session_id, ?path, %err,
re_log::trace!(%analytics_id, %session_id, ?path, %err,
"failed to remove session file");
}
}
}
Err(err) => trace!(%analytics_id, %session_id, ?path, %err,
Err(err) => re_log::trace!(%analytics_id, %session_id, ?path, %err,
"failed to flush pending events"),
}
}
Expand Down Expand Up @@ -205,7 +205,7 @@ fn realtime_pipeline(
}

if let Err(err) = session_file.set_len(0) {
re_log::warn_once!("couldn't truncate analytics data file: {err}");
re_log::debug_once!("couldn't truncate analytics data file: {err}");
// We couldn't truncate the session file: we'll have to keep it intact for now, which
// will result in duplicated data that we'll be able to deduplicate at query time.
return;
Expand All @@ -214,12 +214,12 @@ fn realtime_pipeline(
// We couldn't reset the session file... That one is a bit messy and will likely break
// analytics for the entire duration of this session, but that really _really_ should
// never happen.
re_log::warn_once!("couldn't seek into analytics data file: {err}");
re_log::debug_once!("couldn't seek into analytics data file: {err}");
}
};

let on_event = |session_file: &mut _, event| {
trace!(
re_log::trace!(
%analytics_id, %session_id,
"appending event to current session file..."
);
Expand Down Expand Up @@ -257,7 +257,7 @@ fn append_event(
let mut event_str = match serde_json::to_string(&event) {
Ok(event_str) => event_str,
Err(err) => {
error!(%err, %analytics_id, %session_id, "corrupt analytics event: discarding");
re_log::debug!(%err, %analytics_id, %session_id, "corrupt analytics event: discarding");
return Ok(());
}
};
Expand All @@ -272,7 +272,7 @@ fn append_event(
// We'll try to write a linefeed one more time, just in case, to avoid potentially
// impacting other events.
session_file.write_all(b"\n").ok();
warn!(%err, %analytics_id, %session_id, "couldn't write to analytics data file");
re_log::debug!(%err, %analytics_id, %session_id, "couldn't write to analytics data file");
return Err(event);
}

Expand All @@ -287,7 +287,7 @@ fn flush_events(
sink: &PostHogSink,
) -> Result<(), SinkError> {
if let Err(err) = session_file.rewind() {
warn!(%err, %analytics_id, %session_id, "couldn't seek into analytics data file");
re_log::debug!(%err, %analytics_id, %session_id, "couldn't seek into analytics data file");
return Err(SinkError::FileSeek(err));
}

Expand All @@ -299,14 +299,14 @@ fn flush_events(
Ok(event) => Some(event),
Err(err) => {
// NOTE: This is effectively where we detect possible half-writes.
error!(%err, %analytics_id, %session_id,
re_log::debug!(%err, %analytics_id, %session_id,
"couldn't deserialize event from analytics data file: dropping it");
None
}
}
}
Err(err) => {
error!(%err, %analytics_id, %session_id,
re_log::debug!(%err, %analytics_id, %session_id,
"couldn't read line from analytics data file: dropping event");
None
}
Expand All @@ -326,15 +326,15 @@ fn flush_events(
);
} else {
// A more unusual error. Show it to the user.
re_log::warn_once!(
re_log::debug_once!(
"Failed to send analytics down the sink, will try again later.\n{err}
"
);
}
return Err(err);
}

trace!(
re_log::trace!(
%analytics_id,
%session_id,
num_events = events.len(),
Expand Down

0 comments on commit 771d2de

Please sign in to comment.