diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 7c58cd7303ebc..e0c6c777c09af 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -247,6 +247,8 @@ impl CandidateHeadUsages { pub fn merge_usages(&mut self, other: CandidateHeadUsages) { if let Some(other_usages) = other.usages { if let Some(ref mut self_usages) = self.usages { + // Each head is merged independently, so the final usage counts are the same + // regardless of hash iteration order. #[allow(rustc::potential_query_instability)] for (head_index, head) in other_usages.into_iter() { let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = head; @@ -501,6 +503,8 @@ impl NestedGoals { /// to all nested goals of that nested goal are also inductive. Otherwise the paths are /// the same as for the child. fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals) { + // Each nested goal is updated independently, and `insert` only unions paths for that + // goal, so traversal order cannot affect the result. #[allow(rustc::potential_query_instability)] for (input, paths_to_nested) in nested_goals.iter() { let paths_to_nested = paths_to_nested.extend_with(step_kind); @@ -508,6 +512,8 @@ impl NestedGoals { } } + // This helper intentionally exposes unstable hash iteration so each caller must opt in + // locally and justify why its traversal is order-insensitive. #[cfg_attr(feature = "nightly", rustc_lint_query_instability)] #[allow(rustc::potential_query_instability)] fn iter(&self) -> impl Iterator + '_ { @@ -710,6 +716,8 @@ impl, X: Cx> SearchGraph { pub fn ignore_candidate_head_usages(&mut self, usages: CandidateHeadUsages) { if let Some(usages) = usages.usages { let (entry_index, entry) = self.stack.last_mut_with_index().unwrap(); + // Ignoring usages only mutates the state for the current `head_index`, so the + // resulting per-head state is unchanged by iteration order. #[allow(rustc::potential_query_instability)] for (head_index, usages) in usages.into_iter() { if head_index == entry_index { @@ -901,6 +909,8 @@ impl, X: Cx> SearchGraph { /// don't depend on its value. fn clear_dependent_provisional_results_for_rerun(&mut self) { let rerun_index = self.stack.next_index(); + // Each cached entry is filtered independently based on whether it depends on + // `rerun_index`, so bucket traversal order does not matter. #[allow(rustc::potential_query_instability)] self.provisional_cache.retain(|_, entries| { entries.retain(|entry| { @@ -958,6 +968,8 @@ impl, X: Cx> SearchGraph { rebase_reason: RebaseReason, ) { let popped_head_index = self.stack.next_index(); + // Rebasing decisions depend only on each provisional entry and the current stack state, + // so traversing the cache in hash order cannot change the final cache contents. #[allow(rustc::potential_query_instability)] self.provisional_cache.retain(|&input, entries| { entries.retain_mut(|entry| { @@ -1139,6 +1151,8 @@ impl, X: Cx> SearchGraph { // The global cache entry is also invalid if there's a provisional cache entry // would apply for any of its nested goals. + // Any matching provisional entry rejects the candidate, + // so iteration order only affects when we return `false`, not the final answer. #[allow(rustc::potential_query_instability)] for (input, path_from_global_entry) in nested_goals.iter() { let Some(entries) = self.provisional_cache.get(&input) else {