diff --git a/src/function.rs b/src/function.rs index 891e3dbad..e9ab57939 100644 --- a/src/function.rs +++ b/src/function.rs @@ -311,7 +311,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle { same_thread } => WaitForResult::Cycle { same_thread }, + ClaimResult::Cycle => WaitForResult::Cycle, ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index ac96edade..c5422ffeb 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -267,7 +267,7 @@ where /// cycle heads have all been finalized. /// * provisional memos that have been created in the same revision and iteration and are part of the same cycle. #[inline] - pub(super) fn validate_may_be_provisional( + fn validate_may_be_provisional( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, @@ -342,7 +342,7 @@ where /// If this is a provisional memo, validate that it was cached in the same iteration of the /// same cycle(s) that we are still executing. If so, it is valid for reuse. This avoids /// runaway re-execution of the same queries within a fixpoint iteration. - pub(super) fn validate_same_iteration( + fn validate_same_iteration( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, @@ -369,15 +369,42 @@ where .find(|query| query.database_key_index == cycle_head.database_key_index) .map(|query| query.iteration_count()) .or_else(|| { - // If this is a cycle head is owned by another thread that is blocked by this ingredient, - // check if it has the same iteration count. + // If the cycle head isn't on our stack because: + // + // * another thread holds the lock on the cycle head (but it waits for the current query to complete) + // * we're in `maybe_changed_after` because `maybe_changed_after` doesn't modify the cycle stack + // + // check if the latest memo has the same iteration count. + + // However, we've to be careful to skip over fixpoint initial values: + // If the head is the memo we're trying to validate, always return `None` + // to force a re-execution of the query. This is necessary because the query + // has obviously not completed its iteration yet. + // + // This should be rare but the `cycle_panic` test fails on some platforms (mainly GitHub actions) + // without this check. What happens there is that: + // + // * query a blocks on query b + // * query b tries to claim a, fails to do so and inserts the fixpoint initial value + // * query b completes and has `a` as head. It returns its query result Salsa blocks query b from + // exiting inside `block_on` (or the thread would complete before the cycle iteration is complete) + // * query a resumes but panics because of the fixpoint iteration function + // * query b resumes. It rexecutes its own query which then tries to fetch a (which depends on itself because it's a fixpoint initial value). + // Without this check, `validate_same_iteration` would return `true` because the latest memo for `a` is the fixpoint initial value. + // But it should return `false` so that query b's thread re-executes `a` (which then also causes the panic). + // + // That's why we always return `None` if the cycle head is the same as the current database key index. + if cycle_head.database_key_index == database_key_index { + return None; + } + let ingredient = zalsa.lookup_ingredient( cycle_head.database_key_index.ingredient_index(), ); let wait_result = ingredient .wait_for(zalsa, cycle_head.database_key_index.key_index()); - if !wait_result.is_cycle_with_other_thread() { + if !wait_result.is_cycle() { return None; } diff --git a/src/function/sync.rs b/src/function/sync.rs index 28f088af4..bb514e114 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -20,7 +20,7 @@ pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. - Cycle { same_thread: bool }, + Cycle, /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } @@ -62,7 +62,7 @@ impl SyncTable { write, ) { BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on), - BlockResult::Cycle { same_thread } => ClaimResult::Cycle { same_thread }, + BlockResult::Cycle => ClaimResult::Cycle, } } std::collections::hash_map::Entry::Vacant(vacant_entry) => { diff --git a/src/ingredient.rs b/src/ingredient.rs index 2ad7bb8ee..f117cb696 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -250,12 +250,11 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) pub enum WaitForResult<'me> { Running(Running<'me>), Available, - Cycle { same_thread: bool }, + Cycle, } impl WaitForResult<'_> { - /// Returns `true` if waiting for this input results in a cycle with another thread. - pub const fn is_cycle_with_other_thread(&self) -> bool { - matches!(self, WaitForResult::Cycle { same_thread: false }) + pub const fn is_cycle(&self) -> bool { + matches!(self, WaitForResult::Cycle) } } diff --git a/src/runtime.rs b/src/runtime.rs index bc2859a7e..6a4d1e8b8 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -51,7 +51,7 @@ pub(crate) enum BlockResult<'me> { /// /// The lock is hold by the current thread or there's another thread that is waiting on the current thread, /// and blocking this thread on the other thread would result in a deadlock/cycle. - Cycle { same_thread: bool }, + Cycle, } pub struct Running<'me>(Box>); @@ -230,14 +230,14 @@ impl Runtime { let thread_id = thread::current().id(); // Cycle in the same thread. if thread_id == other_id { - return BlockResult::Cycle { same_thread: true }; + return BlockResult::Cycle; } let dg = self.dependency_graph.lock(); if dg.depends_on(other_id, thread_id) { crate::tracing::debug!("block_on: cycle detected for {database_key:?} in thread {thread_id:?} on {other_id:?}"); - return BlockResult::Cycle { same_thread: false }; + return BlockResult::Cycle; } BlockResult::Running(Running(Box::new(BlockedOnInner { diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f7aa79b31..df3fba477 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -34,6 +34,10 @@ pub trait LogDatabase: HasLogger + Database { self.logger().logs.lock().unwrap().push(string); } + fn clear_logs(&self) { + std::mem::take(&mut *self.logger().logs.lock().unwrap()); + } + /// Asserts what the (formatted) logs should look like, /// clearing the logged events. This takes `&mut self` because /// it is meant to be run from outside any tracked functions. diff --git a/tests/cycle.rs b/tests/cycle.rs index 28266f2c5..3c3687f3d 100644 --- a/tests/cycle.rs +++ b/tests/cycle.rs @@ -1025,3 +1025,42 @@ fn repeat_provisional_query() { "salsa_event(WillExecute { database_key: min_panic(Id(2)) })", ]"#]]); } + +#[test] +fn repeat_provisional_query_incremental() { + let mut db = ExecuteValidateLoggerDatabase::default(); + let a_in = Inputs::new(&db, vec![]); + let b_in = Inputs::new(&db, vec![]); + let c_in = Inputs::new(&db, vec![]); + let a = Input::MinIterate(a_in); + let b = Input::MinPanic(b_in); + let c = Input::MinPanic(c_in); + a_in.set_inputs(&mut db).to(vec![value(59), b.clone()]); + b_in.set_inputs(&mut db) + .to(vec![value(60), c.clone(), c.clone(), c]); + c_in.set_inputs(&mut db).to(vec![a.clone()]); + + a.assert_value(&db, 59); + + db.clear_logs(); + + c_in.set_inputs(&mut db).to(vec![a.clone()]); + + a.assert_value(&db, 59); + + // `min_panic(Id(2)) should only twice: + // * Once before iterating + // * Once as part of iterating + // + // If it runs more than once before iterating, than this suggests that + // `validate_same_iteration` incorrectly returns `false`. + db.assert_logs(expect![[r#" + [ + "salsa_event(WillExecute { database_key: min_panic(Id(2)) })", + "salsa_event(WillExecute { database_key: min_panic(Id(1)) })", + "salsa_event(WillExecute { database_key: min_iterate(Id(0)) })", + "salsa_event(WillIterateCycle { database_key: min_iterate(Id(0)), iteration_count: IterationCount(1), fell_back: false })", + "salsa_event(WillExecute { database_key: min_panic(Id(1)) })", + "salsa_event(WillExecute { database_key: min_panic(Id(2)) })", + ]"#]]); +}