Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make sure re_analytics never log higher than at debug level #3014

Merged
merged 1 commit into from
Aug 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions crates/re_analytics/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
//! All the data we collect can be found in
//! <https://github.com/rerun-io/rerun/blob/latest/crates/re_viewer/src/viewer_analytics.rs>.

// We never use any log levels other than `trace` and `debug` because analytics is not important
// enough to require the attention of our users.

#[cfg(not(target_arch = "wasm32"))]
mod config_native;
#[cfg(not(target_arch = "wasm32"))]
Expand Down Expand Up @@ -47,7 +50,6 @@ use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;

use re_log::trace;
use time::OffsetDateTime;

// ----------------------------------------------------------------------------
Expand Down Expand Up @@ -253,13 +255,13 @@ pub struct Analytics {
impl Analytics {
pub fn new(tick: Duration) -> Result<Self, AnalyticsError> {
let config = Config::load()?;
trace!(?config, ?tick, "loaded analytics config");
re_log::trace!(?config, ?tick, "loaded analytics config");

if config.is_first_run() {
eprintln!("{DISCLAIMER}");

config.save()?;
trace!(?config, ?tick, "saved analytics config");
re_log::trace!(?config, ?tick, "saved analytics config");
}

let sink = PostHogSink::default();
Expand Down
48 changes: 24 additions & 24 deletions crates/re_analytics/src/pipeline_native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ use crossbeam::{
select,
};

use re_log::{error, trace, warn, warn_once};

use crate::{Config, Event, PostHogSink, SinkError};

// TODO(cmc): abstract away the concept of a `Pipeline` behind an actual trait when comes the time
Expand Down Expand Up @@ -83,13 +81,13 @@ impl Pipeline {
let analytics_id = &config.analytics_id;
let session_id = &config.session_id.to_string();

trace!(%analytics_id, %session_id, "pipeline catchup thread started");
re_log::trace!(%analytics_id, %session_id, "pipeline catchup thread started");
let res = flush_pending_events(&config, &sink);
trace!(%analytics_id, %session_id, ?res, "pipeline catchup thread shut down");
re_log::trace!(%analytics_id, %session_id, ?res, "pipeline catchup thread shut down");
}
})
{
re_log::warn!("Failed to spawn analytics thread: {err}");
re_log::debug!("Failed to spawn analytics thread: {err}");
}

if let Err(err) = std::thread::Builder::new().name("pipeline".into()).spawn({
Expand All @@ -99,13 +97,13 @@ impl Pipeline {
let analytics_id = &config.analytics_id;
let session_id = &config.session_id.to_string();

trace!(%analytics_id, %session_id, "pipeline thread started");
re_log::trace!(%analytics_id, %session_id, "pipeline thread started");
let res =
realtime_pipeline(&config, &sink, session_file, tick, &event_tx, &event_rx);
trace!(%analytics_id, %session_id, ?res, "pipeline thread shut down");
re_log::trace!(%analytics_id, %session_id, ?res, "pipeline thread shut down");
}
}) {
re_log::warn!("Failed to spawn analytics thread: {err}");
re_log::debug!("Failed to spawn analytics thread: {err}");
}

Ok(Some(Self { event_tx }))
Expand All @@ -122,15 +120,15 @@ fn try_send_event(event_tx: &channel::Sender<Result<Event, RecvError>>, event: E
match event_tx.try_send(Ok(event)) {
Ok(_) => {}
Err(channel::TrySendError::Full(_)) => {
trace!("dropped event, analytics channel is full");
re_log::trace!("dropped event, analytics channel is full");
}
Err(channel::TrySendError::Disconnected(_)) => {
// The only way this can happen is if the other end of the channel was previously
// closed, which we _never_ do.
// Technically, we should call `.unwrap()` here, but analytics _must never_ be the
// cause of a crash, so let's not take any unnecessary risk and just ignore the
// error instead.
warn_once!("dropped event, analytics channel is disconnected");
re_log::debug_once!("dropped event, analytics channel is disconnected");
}
}
}
Expand Down Expand Up @@ -159,18 +157,20 @@ fn flush_pending_events(config: &Config, sink: &PostHogSink) -> anyhow::Result<(
let Ok(mut session_file) = File::open(&path) else { continue; };
match flush_events(&mut session_file, &analytics_id, session_id, sink) {
Ok(_) => {
trace!(%analytics_id, %session_id, ?path, "flushed pending events");
re_log::trace!(%analytics_id, %session_id, ?path, "flushed pending events");
match std::fs::remove_file(&path) {
Ok(_) => trace!(%analytics_id, %session_id, ?path, "removed session file"),
Ok(_) => {
re_log::trace!(%analytics_id, %session_id, ?path, "removed session file");
}
Err(err) => {
// NOTE: this will eventually lead to duplicated data, though we'll be
// able to deduplicate it at query time.
trace!(%analytics_id, %session_id, ?path, %err,
re_log::trace!(%analytics_id, %session_id, ?path, %err,
"failed to remove session file");
}
}
}
Err(err) => trace!(%analytics_id, %session_id, ?path, %err,
Err(err) => re_log::trace!(%analytics_id, %session_id, ?path, %err,
"failed to flush pending events"),
}
}
Expand Down Expand Up @@ -207,7 +207,7 @@ fn realtime_pipeline(
}

if let Err(err) = session_file.set_len(0) {
re_log::warn_once!("couldn't truncate analytics data file: {err}");
re_log::debug_once!("couldn't truncate analytics data file: {err}");
// We couldn't truncate the session file: we'll have to keep it intact for now, which
// will result in duplicated data that we'll be able to deduplicate at query time.
return;
Expand All @@ -216,12 +216,12 @@ fn realtime_pipeline(
// We couldn't reset the session file... That one is a bit messy and will likely break
// analytics for the entire duration of this session, but that really _really_ should
// never happen.
re_log::warn_once!("couldn't seek into analytics data file: {err}");
re_log::debug_once!("couldn't seek into analytics data file: {err}");
}
};

let on_event = |session_file: &mut _, event| {
trace!(
re_log::trace!(
%analytics_id, %session_id,
"appending event to current session file..."
);
Expand Down Expand Up @@ -259,7 +259,7 @@ fn append_event(
let mut event_str = match serde_json::to_string(&event) {
Ok(event_str) => event_str,
Err(err) => {
error!(%err, %analytics_id, %session_id, "corrupt analytics event: discarding");
re_log::debug!(%err, %analytics_id, %session_id, "corrupt analytics event: discarding");
return Ok(());
}
};
Expand All @@ -274,7 +274,7 @@ fn append_event(
// We'll try to write a linefeed one more time, just in case, to avoid potentially
// impacting other events.
session_file.write_all(b"\n").ok();
warn!(%err, %analytics_id, %session_id, "couldn't write to analytics data file");
re_log::debug!(%err, %analytics_id, %session_id, "couldn't write to analytics data file");
return Err(event);
}

Expand All @@ -289,7 +289,7 @@ fn flush_events(
sink: &PostHogSink,
) -> Result<(), SinkError> {
if let Err(err) = session_file.rewind() {
warn!(%err, %analytics_id, %session_id, "couldn't seek into analytics data file");
re_log::debug!(%err, %analytics_id, %session_id, "couldn't seek into analytics data file");
return Err(SinkError::FileSeek(err));
}

Expand All @@ -301,14 +301,14 @@ fn flush_events(
Ok(event) => Some(event),
Err(err) => {
// NOTE: This is effectively where we detect possible half-writes.
error!(%err, %analytics_id, %session_id,
re_log::debug!(%err, %analytics_id, %session_id,
"couldn't deserialize event from analytics data file: dropping it");
None
}
}
}
Err(err) => {
error!(%err, %analytics_id, %session_id,
re_log::debug!(%err, %analytics_id, %session_id,
"couldn't read line from analytics data file: dropping event");
None
}
Expand All @@ -328,15 +328,15 @@ fn flush_events(
);
} else {
// A more unusual error. Show it to the user.
re_log::warn_once!(
re_log::debug_once!(
"Failed to send analytics down the sink, will try again later.\n{err}
"
);
}
return Err(err);
}

trace!(
re_log::trace!(
%analytics_id,
%session_id,
num_events = events.len(),
Expand Down