diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 72ea55d5999a2..d593f05c8c6db 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -643,8 +643,8 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ through unstable paths" ), rustc_attr!( - rustc_deprecated_safe_2024, Normal, template!(Word), WarnFollowing, - EncodeCrossCrate::Yes, + rustc_deprecated_safe_2024, Normal, template!(List: r#"audit_that = "...""#), + ErrorFollowing, EncodeCrossCrate::Yes, "rustc_deprecated_safe_2024 is supposed to be used in libstd only", ), diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs index e3d7dff3c66bb..37c10b14054c5 100644 --- a/compiler/rustc_middle/src/arena.rs +++ b/compiler/rustc_middle/src/arena.rs @@ -61,10 +61,6 @@ macro_rules! arena_types { [] dtorck_constraint: rustc_middle::traits::query::DropckConstraint<'tcx>, [] candidate_step: rustc_middle::traits::query::CandidateStep<'tcx>, [] autoderef_bad_ty: rustc_middle::traits::query::MethodAutoderefBadTy<'tcx>, - [] canonical_goal_evaluation: - rustc_type_ir::solve::inspect::CanonicalGoalEvaluationStep< - rustc_middle::ty::TyCtxt<'tcx> - >, [] query_region_constraints: rustc_middle::infer::canonical::QueryRegionConstraints<'tcx>, [] type_op_subtype: rustc_middle::infer::canonical::Canonical<'tcx, diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 971e51be25644..48f936eae4267 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -107,8 +107,6 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.mk_predefined_opaques_in_body(data) } type DefiningOpaqueTypes = &'tcx ty::List; - type CanonicalGoalEvaluationStepRef = - &'tcx solve::inspect::CanonicalGoalEvaluationStep>; type CanonicalVars = CanonicalVarInfos<'tcx>; fn mk_canonical_var_infos(self, infos: &[ty::CanonicalVarInfo]) -> Self::CanonicalVars { self.mk_canonical_var_infos(infos) @@ -277,13 +275,6 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.debug_assert_args_compatible(def_id, args); } - fn intern_canonical_goal_evaluation_step( - self, - step: solve::inspect::CanonicalGoalEvaluationStep>, - ) -> &'tcx solve::inspect::CanonicalGoalEvaluationStep> { - self.arena.alloc(step) - } - fn mk_type_list_from_iter(self, args: I) -> T::Output where I: Iterator, diff --git a/compiler/rustc_mir_build/messages.ftl b/compiler/rustc_mir_build/messages.ftl index dda4debecec67..7baf0256dd890 100644 --- a/compiler/rustc_mir_build/messages.ftl +++ b/compiler/rustc_mir_build/messages.ftl @@ -30,7 +30,7 @@ mir_build_call_to_deprecated_safe_fn_requires_unsafe = call to deprecated safe function `{$function}` is unsafe and requires unsafe block .note = consult the function's documentation for information on how to avoid undefined behavior .label = call to unsafe function - .suggestion = you can wrap the call in an `unsafe` block if you can guarantee the code is only ever called from single-threaded code + .suggestion = you can wrap the call in an `unsafe` block if you can guarantee {$guarantee} mir_build_call_to_fn_with_requires_unsafe = call to function `{$function}` with `#[target_feature]` is unsafe and requires unsafe block diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs index 54a4204da71e8..9b85ad0ad0891 100644 --- a/compiler/rustc_mir_build/src/check_unsafety.rs +++ b/compiler/rustc_mir_build/src/check_unsafety.rs @@ -96,9 +96,32 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> { // from an edition before 2024. &UnsafeOpKind::CallToUnsafeFunction(Some(id)) if !span.at_least_rust_2024() - && self.tcx.has_attr(id, sym::rustc_deprecated_safe_2024) => + && let Some(attr) = self.tcx.get_attr(id, sym::rustc_deprecated_safe_2024) => { + let suggestion = attr + .meta_item_list() + .unwrap_or_default() + .into_iter() + .find(|item| item.has_name(sym::audit_that)) + .map(|item| { + item.value_str().expect( + "`#[rustc_deprecated_safe_2024(audit_that)]` must have a string value", + ) + }); + let sm = self.tcx.sess.source_map(); + let guarantee = suggestion + .as_ref() + .map(|suggestion| format!("that {}", suggestion)) + .unwrap_or_else(|| String::from("its unsafe preconditions")); + let suggestion = suggestion + .and_then(|suggestion| { + sm.indentation_before(span).map(|indent| { + format!("{}// TODO: Audit that {}.\n", indent, suggestion) // ignore-tidy-todo + }) + }) + .unwrap_or_default(); + self.tcx.emit_node_span_lint( DEPRECATED_SAFE_2024, self.hir_context, @@ -106,8 +129,9 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> { CallToDeprecatedSafeFnRequiresUnsafe { span, function: with_no_trimmed_paths!(self.tcx.def_path_str(id)), + guarantee, sub: CallToDeprecatedSafeFnRequiresUnsafeSub { - indent: sm.indentation_before(span).unwrap_or_default(), + start_of_line_suggestion: suggestion, start_of_line: sm.span_extend_to_line(span).shrink_to_lo(), left: span.shrink_to_lo(), right: span.shrink_to_hi(), diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs index 42eca71ca3f30..34577f102d1c5 100644 --- a/compiler/rustc_mir_build/src/errors.rs +++ b/compiler/rustc_mir_build/src/errors.rs @@ -28,6 +28,7 @@ pub(crate) struct CallToDeprecatedSafeFnRequiresUnsafe { #[label] pub(crate) span: Span, pub(crate) function: String, + pub(crate) guarantee: String, #[subdiagnostic] pub(crate) sub: CallToDeprecatedSafeFnRequiresUnsafeSub, } @@ -35,10 +36,8 @@ pub(crate) struct CallToDeprecatedSafeFnRequiresUnsafe { #[derive(Subdiagnostic)] #[multipart_suggestion(mir_build_suggestion, applicability = "machine-applicable")] pub(crate) struct CallToDeprecatedSafeFnRequiresUnsafeSub { - pub(crate) indent: String, - #[suggestion_part( - code = "{indent}// TODO: Audit that the environment access only happens in single-threaded code.\n" // ignore-tidy-todo - )] + pub(crate) start_of_line_suggestion: String, + #[suggestion_part(code = "{start_of_line_suggestion}")] pub(crate) start_of_line: Span, #[suggestion_part(code = "unsafe {{ ")] pub(crate) left: Span, diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs index a3c21666bd67c..86fb036cd3df8 100644 --- a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs +++ b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs @@ -5,11 +5,10 @@ //! see the comment on [ProofTreeBuilder]. use std::marker::PhantomData; -use std::mem; use derive_where::derive_where; use rustc_type_ir::inherent::*; -use rustc_type_ir::{self as ty, search_graph, Interner}; +use rustc_type_ir::{self as ty, Interner}; use crate::delegate::SolverDelegate; use crate::solve::eval_ctxt::canonical; @@ -94,31 +93,10 @@ impl WipGoalEvaluation { } } -#[derive_where(PartialEq, Eq; I: Interner)] -pub(in crate::solve) enum WipCanonicalGoalEvaluationKind { - Overflow, - CycleInStack, - ProvisionalCacheHit, - Interned { final_revision: I::CanonicalGoalEvaluationStepRef }, -} - -impl std::fmt::Debug for WipCanonicalGoalEvaluationKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Overflow => write!(f, "Overflow"), - Self::CycleInStack => write!(f, "CycleInStack"), - Self::ProvisionalCacheHit => write!(f, "ProvisionalCacheHit"), - Self::Interned { final_revision: _ } => { - f.debug_struct("Interned").finish_non_exhaustive() - } - } - } -} - #[derive_where(PartialEq, Eq, Debug; I: Interner)] struct WipCanonicalGoalEvaluation { goal: CanonicalInput, - kind: Option>, + encountered_overflow: bool, /// Only used for uncached goals. After we finished evaluating /// the goal, this is interned and moved into `kind`. final_revision: Option>, @@ -127,25 +105,17 @@ struct WipCanonicalGoalEvaluation { impl WipCanonicalGoalEvaluation { fn finalize(self) -> inspect::CanonicalGoalEvaluation { - // We've already interned the final revision in - // `fn finalize_canonical_goal_evaluation`. - assert!(self.final_revision.is_none()); - let kind = match self.kind.unwrap() { - WipCanonicalGoalEvaluationKind::Overflow => { + inspect::CanonicalGoalEvaluation { + goal: self.goal, + kind: if self.encountered_overflow { + assert!(self.final_revision.is_none()); inspect::CanonicalGoalEvaluationKind::Overflow - } - WipCanonicalGoalEvaluationKind::CycleInStack => { - inspect::CanonicalGoalEvaluationKind::CycleInStack - } - WipCanonicalGoalEvaluationKind::ProvisionalCacheHit => { - inspect::CanonicalGoalEvaluationKind::ProvisionalCacheHit - } - WipCanonicalGoalEvaluationKind::Interned { final_revision } => { + } else { + let final_revision = self.final_revision.unwrap().finalize(); inspect::CanonicalGoalEvaluationKind::Evaluation { final_revision } - } - }; - - inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() } + }, + result: self.result.unwrap(), + } } } @@ -308,7 +278,7 @@ impl, I: Interner> ProofTreeBuilder { ) -> ProofTreeBuilder { self.nested(|| WipCanonicalGoalEvaluation { goal, - kind: None, + encountered_overflow: false, final_revision: None, result: None, }) @@ -329,11 +299,11 @@ impl, I: Interner> ProofTreeBuilder { } } - pub fn canonical_goal_evaluation_kind(&mut self, kind: WipCanonicalGoalEvaluationKind) { + pub fn canonical_goal_evaluation_overflow(&mut self) { if let Some(this) = self.as_mut() { match this { DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => { - assert_eq!(canonical_goal_evaluation.kind.replace(kind), None); + canonical_goal_evaluation.encountered_overflow = true; } _ => unreachable!(), }; @@ -547,51 +517,3 @@ impl, I: Interner> ProofTreeBuilder { } } } - -impl search_graph::ProofTreeBuilder for ProofTreeBuilder -where - D: SolverDelegate, - I: Interner, -{ - fn try_apply_proof_tree( - &mut self, - proof_tree: Option, - ) -> bool { - if !self.is_noop() { - if let Some(final_revision) = proof_tree { - let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision }; - self.canonical_goal_evaluation_kind(kind); - true - } else { - false - } - } else { - true - } - } - - fn on_provisional_cache_hit(&mut self) { - self.canonical_goal_evaluation_kind(WipCanonicalGoalEvaluationKind::ProvisionalCacheHit); - } - - fn on_cycle_in_stack(&mut self) { - self.canonical_goal_evaluation_kind(WipCanonicalGoalEvaluationKind::CycleInStack); - } - - fn finalize_canonical_goal_evaluation( - &mut self, - tcx: I, - ) -> Option { - self.as_mut().map(|this| match this { - DebugSolver::CanonicalGoalEvaluation(evaluation) => { - let final_revision = mem::take(&mut evaluation.final_revision).unwrap(); - let final_revision = - tcx.intern_canonical_goal_evaluation_step(final_revision.finalize()); - let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision }; - assert_eq!(evaluation.kind.replace(kind), None); - final_revision - } - _ => unreachable!(), - }) - } -} diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index fe053a506e712..81c89fad8e8a0 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -1,12 +1,13 @@ +use std::convert::Infallible; use std::marker::PhantomData; use rustc_type_ir::inherent::*; -use rustc_type_ir::search_graph::{self, CycleKind, UsageKind}; +use rustc_type_ir::search_graph::{self, PathKind}; use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult}; use rustc_type_ir::Interner; -use super::inspect::{self, ProofTreeBuilder}; -use super::FIXPOINT_STEP_LIMIT; +use super::inspect::ProofTreeBuilder; +use super::{has_no_inference_or_external_constraints, FIXPOINT_STEP_LIMIT}; use crate::delegate::SolverDelegate; /// This type is never constructed. We only use it to implement `search_graph::Delegate` @@ -22,43 +23,48 @@ where { type Cx = D::Interner; + const ENABLE_PROVISIONAL_CACHE: bool = true; + type ValidationScope = Infallible; + fn enter_validation_scope( + _cx: Self::Cx, + _input: CanonicalInput, + ) -> Option { + None + } + const FIXPOINT_STEP_LIMIT: usize = FIXPOINT_STEP_LIMIT; type ProofTreeBuilder = ProofTreeBuilder; + fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool { + inspect.is_noop() + } + const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize = 4; fn recursion_limit(cx: I) -> usize { cx.recursion_limit() } fn initial_provisional_result( cx: I, - kind: CycleKind, + kind: PathKind, input: CanonicalInput, ) -> QueryResult { match kind { - CycleKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes), - CycleKind::Inductive => response_no_constraints(cx, input, Certainty::overflow(false)), + PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes), + PathKind::Inductive => response_no_constraints(cx, input, Certainty::overflow(false)), } } - fn reached_fixpoint( - cx: I, - kind: UsageKind, + fn is_initial_provisional_result( + cx: Self::Cx, + kind: PathKind, input: CanonicalInput, - provisional_result: Option>, result: QueryResult, ) -> bool { - if let Some(r) = provisional_result { - r == result - } else { - match kind { - UsageKind::Single(CycleKind::Coinductive) => { - response_no_constraints(cx, input, Certainty::Yes) == result - } - UsageKind::Single(CycleKind::Inductive) => { - response_no_constraints(cx, input, Certainty::overflow(false)) == result - } - UsageKind::Mixed => false, + match kind { + PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes) == result, + PathKind::Inductive => { + response_no_constraints(cx, input, Certainty::overflow(false)) == result } } } @@ -68,7 +74,7 @@ where inspect: &mut ProofTreeBuilder, input: CanonicalInput, ) -> QueryResult { - inspect.canonical_goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow); + inspect.canonical_goal_evaluation_overflow(); response_no_constraints(cx, input, Certainty::overflow(true)) } @@ -76,6 +82,22 @@ where response_no_constraints(cx, input, Certainty::overflow(false)) } + fn is_ambiguous_result(result: QueryResult) -> bool { + result.is_ok_and(|response| { + has_no_inference_or_external_constraints(response) + && matches!(response.value.certainty, Certainty::Maybe(_)) + }) + } + + fn propagate_ambiguity( + cx: I, + for_input: CanonicalInput, + from_result: QueryResult, + ) -> QueryResult { + let certainty = from_result.unwrap().value.certainty; + response_no_constraints(cx, for_input, certainty) + } + fn step_is_coinductive(cx: I, input: CanonicalInput) -> bool { input.value.goal.predicate.is_coinductive(cx) } diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 9cb729ec48588..a2e94492f8c23 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -472,6 +472,7 @@ symbols! { attr, attr_literals, attributes, + audit_that, augmented_assignments, auto_traits, automatically_derived, diff --git a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs index 1a459aa484ff1..51dda25d8adcc 100644 --- a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs +++ b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs @@ -334,13 +334,9 @@ impl<'a, 'tcx> InspectGoal<'a, 'tcx> { pub fn candidates(&'a self) -> Vec> { let mut candidates = vec![]; - let last_eval_step = match self.evaluation_kind { - inspect::CanonicalGoalEvaluationKind::Overflow - | inspect::CanonicalGoalEvaluationKind::CycleInStack - | inspect::CanonicalGoalEvaluationKind::ProvisionalCacheHit => { - warn!("unexpected root evaluation: {:?}", self.evaluation_kind); - return vec![]; - } + let last_eval_step = match &self.evaluation_kind { + // An annoying edge case in case the recursion limit is 0. + inspect::CanonicalGoalEvaluationKind::Overflow => return vec![], inspect::CanonicalGoalEvaluationKind::Evaluation { final_revision } => final_revision, }; diff --git a/compiler/rustc_type_ir/src/binder.rs b/compiler/rustc_type_ir/src/binder.rs index c1f6fb36324ed..8797288070e71 100644 --- a/compiler/rustc_type_ir/src/binder.rs +++ b/compiler/rustc_type_ir/src/binder.rs @@ -8,7 +8,7 @@ use derive_where::derive_where; use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable}; #[cfg(feature = "nightly")] use rustc_serialize::Decodable; -use tracing::debug; +use tracing::instrument; use crate::data_structures::SsoHashSet; use crate::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable}; @@ -831,28 +831,20 @@ impl<'a, I: Interner> ArgFolder<'a, I> { /// As indicated in the diagram, here the same type `&'a i32` is instantiated once, but in the /// first case we do not increase the De Bruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. + #[instrument(level = "trace", skip(self), fields(binders_passed = self.binders_passed), ret)] fn shift_vars_through_binders>(&self, val: T) -> T { - debug!( - "shift_vars(val={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})", - val, - self.binders_passed, - val.has_escaping_bound_vars() - ); - if self.binders_passed == 0 || !val.has_escaping_bound_vars() { - return val; + val + } else { + ty::fold::shift_vars(self.cx, val, self.binders_passed) } - - let result = ty::fold::shift_vars(TypeFolder::cx(self), val, self.binders_passed); - debug!("shift_vars: shifted result = {:?}", result); - - result } fn shift_region_through_binders(&self, region: I::Region) -> I::Region { if self.binders_passed == 0 || !region.has_escaping_bound_vars() { - return region; + region + } else { + ty::fold::shift_region(self.cx, region, self.binders_passed) } - ty::fold::shift_region(self.cx, region, self.binders_passed) } } diff --git a/compiler/rustc_type_ir/src/fold.rs b/compiler/rustc_type_ir/src/fold.rs index d37bacc7d359f..8e3534b0e9eb4 100644 --- a/compiler/rustc_type_ir/src/fold.rs +++ b/compiler/rustc_type_ir/src/fold.rs @@ -48,7 +48,7 @@ use std::mem; use rustc_index::{Idx, IndexVec}; -use tracing::debug; +use tracing::instrument; use crate::data_structures::Lrc; use crate::inherent::*; @@ -417,15 +417,14 @@ pub fn shift_region(cx: I, region: I::Region, amount: u32) -> I::Re } } +#[instrument(level = "trace", skip(cx), ret)] pub fn shift_vars(cx: I, value: T, amount: u32) -> T where T: TypeFoldable, { - debug!("shift_vars(value={:?}, amount={})", value, amount); - if amount == 0 || !value.has_escaping_bound_vars() { - return value; + value + } else { + value.fold_with(&mut Shifter::new(cx, amount)) } - - value.fold_with(&mut Shifter::new(cx, amount)) } diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index c251540c0fc29..f2492ede4f5ea 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -11,7 +11,6 @@ use crate::inherent::*; use crate::ir_print::IrPrint; use crate::lang_items::TraitSolverLangItem; use crate::relate::Relate; -use crate::solve::inspect::CanonicalGoalEvaluationStep; use crate::solve::{ CanonicalInput, ExternalConstraintsData, PredefinedOpaquesData, QueryResult, SolverMode, }; @@ -65,11 +64,6 @@ pub trait Interner: + Eq + TypeVisitable + SliceLike; - type CanonicalGoalEvaluationStepRef: Copy - + Debug - + Hash - + Eq - + Deref>; type CanonicalVars: Copy + Debug @@ -177,11 +171,6 @@ pub trait Interner: fn debug_assert_args_compatible(self, def_id: Self::DefId, args: Self::GenericArgs); - fn intern_canonical_goal_evaluation_step( - self, - step: CanonicalGoalEvaluationStep, - ) -> Self::CanonicalGoalEvaluationStepRef; - fn mk_type_list_from_iter(self, args: I) -> T::Output where I: Iterator, @@ -390,7 +379,6 @@ impl CollectAndApply for Result { } impl search_graph::Cx for I { - type ProofTree = Option; type Input = CanonicalInput; type Result = QueryResult; diff --git a/compiler/rustc_type_ir/src/search_graph/global_cache.rs b/compiler/rustc_type_ir/src/search_graph/global_cache.rs index be4f1069cd167..47f7cefac6ad1 100644 --- a/compiler/rustc_type_ir/src/search_graph/global_cache.rs +++ b/compiler/rustc_type_ir/src/search_graph/global_cache.rs @@ -1,18 +1,17 @@ use derive_where::derive_where; -use rustc_index::IndexVec; -use super::{AvailableDepth, Cx, StackDepth, StackEntry}; -use crate::data_structures::{HashMap, HashSet}; - -#[derive_where(Debug, Clone, Copy; X: Cx)] -struct QueryData { - result: X::Result, - proof_tree: X::ProofTree, -} +use super::{AvailableDepth, Cx, NestedGoals}; +use crate::data_structures::HashMap; struct Success { - data: X::Tracked>, additional_depth: usize, + nested_goals: NestedGoals, + result: X::Tracked, +} + +struct WithOverflow { + nested_goals: NestedGoals, + result: X::Tracked, } /// The cache entry for a given input. @@ -23,24 +22,15 @@ struct Success { #[derive_where(Default; X: Cx)] struct CacheEntry { success: Option>, - /// We have to be careful when caching roots of cycles. - /// - /// See the doc comment of `StackEntry::cycle_participants` for more - /// details. - nested_goals: HashSet, - with_overflow: HashMap>>, + with_overflow: HashMap>, } #[derive_where(Debug; X: Cx)] pub(super) struct CacheData<'a, X: Cx> { pub(super) result: X::Result, - pub(super) proof_tree: X::ProofTree, pub(super) additional_depth: usize, pub(super) encountered_overflow: bool, - // FIXME: This is currently unused, but impacts the design - // by requiring a closure for `Cx::with_global_cache`. - #[allow(dead_code)] - pub(super) nested_goals: &'a HashSet, + pub(super) nested_goals: &'a NestedGoals, } #[derive_where(Default; X: Cx)] pub struct GlobalCache { @@ -55,20 +45,21 @@ impl GlobalCache { input: X::Input, result: X::Result, - proof_tree: X::ProofTree, dep_node: X::DepNodeIndex, additional_depth: usize, encountered_overflow: bool, - nested_goals: &HashSet, + nested_goals: NestedGoals, ) { - let data = cx.mk_tracked(QueryData { result, proof_tree }, dep_node); + let result = cx.mk_tracked(result, dep_node); let entry = self.map.entry(input).or_default(); - entry.nested_goals.extend(nested_goals); if encountered_overflow { - entry.with_overflow.insert(additional_depth, data); + let with_overflow = WithOverflow { nested_goals, result }; + let prev = entry.with_overflow.insert(additional_depth, with_overflow); + assert!(prev.is_none()); } else { - entry.success = Some(Success { data, additional_depth }); + let prev = entry.success.replace(Success { additional_depth, nested_goals, result }); + assert!(prev.is_none()); } } @@ -80,36 +71,37 @@ impl GlobalCache { &'a self, cx: X, input: X::Input, - stack: &IndexVec>, available_depth: AvailableDepth, + mut candidate_is_applicable: impl FnMut(&NestedGoals) -> bool, ) -> Option> { let entry = self.map.get(&input)?; - if stack.iter().any(|e| entry.nested_goals.contains(&e.input)) { - return None; - } - - if let Some(ref success) = entry.success { - if available_depth.cache_entry_is_applicable(success.additional_depth) { - let QueryData { result, proof_tree } = cx.get_tracked(&success.data); + if let Some(Success { additional_depth, ref nested_goals, ref result }) = entry.success { + if available_depth.cache_entry_is_applicable(additional_depth) + && candidate_is_applicable(nested_goals) + { return Some(CacheData { - result, - proof_tree, - additional_depth: success.additional_depth, + result: cx.get_tracked(&result), + additional_depth, encountered_overflow: false, - nested_goals: &entry.nested_goals, + nested_goals, }); } } - entry.with_overflow.get(&available_depth.0).map(|e| { - let QueryData { result, proof_tree } = cx.get_tracked(e); - CacheData { - result, - proof_tree, - additional_depth: available_depth.0, - encountered_overflow: true, - nested_goals: &entry.nested_goals, + let additional_depth = available_depth.0; + if let Some(WithOverflow { nested_goals, result }) = + entry.with_overflow.get(&additional_depth) + { + if candidate_is_applicable(nested_goals) { + return Some(CacheData { + result: cx.get_tracked(result), + additional_depth, + encountered_overflow: true, + nested_goals, + }); } - }) + } + + None } } diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 4abf99b1ded8a..d47c9e725f350 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -1,19 +1,32 @@ +/// The search graph is responsible for caching and cycle detection in the trait +/// solver. Making sure that caching doesn't result in soundness bugs or unstable +/// query results is very challenging and makes this one of the most-involved +/// self-contained components of the compiler. +/// +/// We added fuzzing support to test its correctness. The fuzzers used to verify +/// the current implementation can be found in https://github.com/lcnr/search_graph_fuzz. +/// +/// This is just a quick overview of the general design, please check out the relevant +/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for +/// more details. Caching is split between a global cache and the per-cycle `provisional_cache`. +/// The global cache has to be completely unobservable, while the per-cycle cache may impact +/// behavior as long as the resulting behavior is still correct. +use std::cmp::Ordering; +use std::collections::BTreeSet; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; -use std::mem; use derive_where::derive_where; use rustc_index::{Idx, IndexVec}; use tracing::debug; -use crate::data_structures::{HashMap, HashSet}; +use crate::data_structures::HashMap; use crate::solve::SolverMode; mod global_cache; use global_cache::CacheData; pub use global_cache::GlobalCache; -mod validate; /// The search graph does not simply use `Interner` directly /// to enable its fuzzing without having to stub the rest of @@ -22,7 +35,6 @@ mod validate; /// about `Input` and `Result` as they are implementation details /// of the search graph. pub trait Cx: Copy { - type ProofTree: Debug + Copy; type Input: Debug + Eq + Hash + Copy; type Result: Debug + Eq + Hash + Copy; @@ -43,30 +55,41 @@ pub trait Cx: Copy { ) -> R; } -pub trait ProofTreeBuilder { - fn try_apply_proof_tree(&mut self, proof_tree: X::ProofTree) -> bool; - fn on_provisional_cache_hit(&mut self); - fn on_cycle_in_stack(&mut self); - fn finalize_canonical_goal_evaluation(&mut self, cx: X) -> X::ProofTree; -} - pub trait Delegate { type Cx: Cx; + /// Whether to use the provisional cache. Set to `false` by a fuzzer when + /// validating the search graph. + const ENABLE_PROVISIONAL_CACHE: bool; + type ValidationScope; + /// Returning `Some` disables the global cache for the current goal. + /// + /// The `ValidationScope` is used when fuzzing the search graph to track + /// for which goals the global cache has been disabled. This is necessary + /// as we may otherwise ignore the global cache entry for some goal `G` + /// only to later use it, failing to detect a cycle goal and potentially + /// changing the result. + fn enter_validation_scope( + cx: Self::Cx, + input: ::Input, + ) -> Option; + const FIXPOINT_STEP_LIMIT: usize; - type ProofTreeBuilder: ProofTreeBuilder; + type ProofTreeBuilder; + fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool; + + const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize; fn recursion_limit(cx: Self::Cx) -> usize; fn initial_provisional_result( cx: Self::Cx, - kind: CycleKind, + kind: PathKind, input: ::Input, ) -> ::Result; - fn reached_fixpoint( + fn is_initial_provisional_result( cx: Self::Cx, - kind: UsageKind, + kind: PathKind, input: ::Input, - provisional_result: Option<::Result>, result: ::Result, ) -> bool; fn on_stack_overflow( @@ -79,6 +102,13 @@ pub trait Delegate { input: ::Input, ) -> ::Result; + fn is_ambiguous_result(result: ::Result) -> bool; + fn propagate_ambiguity( + cx: Self::Cx, + for_input: ::Input, + from_result: ::Result, + ) -> ::Result; + fn step_is_coinductive(cx: Self::Cx, input: ::Input) -> bool; } @@ -86,19 +116,20 @@ pub trait Delegate { /// result. In the case we return an initial provisional result depending /// on the kind of cycle. #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum CycleKind { +pub enum PathKind { Coinductive, Inductive, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UsageKind { - Single(CycleKind), + Single(PathKind), Mixed, } impl UsageKind { fn merge(self, other: Self) -> Self { match (self, other) { + (UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed, (UsageKind::Single(lhs), UsageKind::Single(rhs)) => { if lhs == rhs { UsageKind::Single(lhs) @@ -106,11 +137,11 @@ impl UsageKind { UsageKind::Mixed } } - (UsageKind::Mixed, UsageKind::Mixed) - | (UsageKind::Mixed, UsageKind::Single(_)) - | (UsageKind::Single(_), UsageKind::Mixed) => UsageKind::Mixed, } } + fn and_merge(&mut self, other: Self) { + *self = self.merge(other); + } } #[derive(Debug, Clone, Copy)] @@ -132,7 +163,7 @@ impl AvailableDepth { } Some(if last.encountered_overflow { - AvailableDepth(last.available_depth.0 / 2) + AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW) } else { AvailableDepth(last.available_depth.0 - 1) }) @@ -148,97 +179,181 @@ impl AvailableDepth { } } +/// All cycle heads a given goal depends on, ordered by their stack depth. +/// +/// We therefore pop the cycle heads from highest to lowest. +#[derive(Clone, Debug, PartialEq, Eq, Default)] +struct CycleHeads { + heads: BTreeSet, +} + +impl CycleHeads { + fn is_empty(&self) -> bool { + self.heads.is_empty() + } + + fn highest_cycle_head(&self) -> StackDepth { + *self.heads.last().unwrap() + } + + fn opt_highest_cycle_head(&self) -> Option { + self.heads.last().copied() + } + + fn opt_lowest_cycle_head(&self) -> Option { + self.heads.first().copied() + } + + fn remove_highest_cycle_head(&mut self) { + let last = self.heads.pop_last(); + debug_assert_ne!(last, None); + } + + fn insert(&mut self, head: StackDepth) { + self.heads.insert(head); + } + + fn merge(&mut self, heads: &CycleHeads) { + for &head in heads.heads.iter() { + self.insert(head); + } + } + + /// Update the cycle heads of a goal at depth `this` given the cycle heads + /// of a nested goal. This merges the heads after filtering the parent goal + /// itself. + fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) { + for &head in child.heads.iter() { + match head.cmp(&this) { + Ordering::Less => {} + Ordering::Equal => continue, + Ordering::Greater => unreachable!(), + } + + self.insert(head); + } + } +} + +/// The nested goals of each stack entry and the path from the +/// stack entry to that nested goal. +/// +/// We only start tracking nested goals once we've either encountered +/// overflow or a solver cycle. This is a performance optimization to +/// avoid tracking nested goals on the happy path. +/// +/// We use nested goals for two reasons: +/// - when rebasing provisional cache entries +/// - when checking whether we have to ignore a global cache entry as reevaluating +/// it would encounter a cycle or use a provisional cache entry. +/// +/// We need to disable the global cache if using it would hide a cycle, as +/// cycles can impact behavior. The cycle ABA may have different final +/// results from a the cycle BAB depending on the cycle root. +#[derive_where(Debug, Default; X: Cx)] +struct NestedGoals { + nested_goals: HashMap, +} +impl NestedGoals { + fn is_empty(&self) -> bool { + self.nested_goals.is_empty() + } + + fn insert(&mut self, input: X::Input, path_from_entry: UsageKind) { + self.nested_goals.entry(input).or_insert(path_from_entry).and_merge(path_from_entry); + } + + fn merge(&mut self, nested_goals: &NestedGoals) { + #[allow(rustc::potential_query_instability)] + for (input, path_from_entry) in nested_goals.iter() { + self.insert(input, path_from_entry); + } + } + + /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal + /// to the parent goal. + /// + /// If the path from this goal to the nested goal is inductive, the paths from this goal + /// to all nested goals of that nested goal are also inductive. Otherwise the paths are + /// the same as for the child. + fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals) { + #[allow(rustc::potential_query_instability)] + for (input, path_from_entry) in nested_goals.iter() { + let path_from_entry = match step_kind { + PathKind::Coinductive => path_from_entry, + PathKind::Inductive => UsageKind::Single(PathKind::Inductive), + }; + self.insert(input, path_from_entry); + } + } + + #[rustc_lint_query_instability] + #[allow(rustc::potential_query_instability)] + fn iter(&self) -> impl Iterator + '_ { + self.nested_goals.iter().map(|(i, p)| (*i, *p)) + } + + fn get(&self, input: X::Input) -> Option { + self.nested_goals.get(&input).copied() + } + + fn contains(&self, input: X::Input) -> bool { + self.nested_goals.contains_key(&input) + } +} + rustc_index::newtype_index! { #[orderable] #[gate_rustc_only] pub struct StackDepth {} } +/// Stack entries of the evaluation stack. Its fields tend to be lazily +/// when popping a child goal or completely immutable. #[derive_where(Debug; X: Cx)] struct StackEntry { input: X::Input, + /// The available depth of a given goal, immutable. available_depth: AvailableDepth, /// The maximum depth reached by this stack entry, only up-to date /// for the top of the stack and lazily updated for the rest. reached_depth: StackDepth, - /// Whether this entry is a non-root cycle participant. - /// - /// We must not move the result of non-root cycle participants to the - /// global cache. We store the highest stack depth of a head of a cycle - /// this goal is involved in. This necessary to soundly cache its - /// provisional result. - non_root_cycle_participant: Option, + /// All cycle heads this goal depends on. Lazily updated and only + /// up-to date for the top of the stack. + heads: CycleHeads, + /// Whether evaluating this goal encountered overflow. Lazily updated. encountered_overflow: bool, + /// Whether this goal has been used as the root of a cycle. This gets + /// eagerly updated when encountering a cycle. has_been_used: Option, - /// We put only the root goal of a coinductive cycle into the global cache. - /// - /// If we were to use that result when later trying to prove another cycle - /// participant, we can end up with unstable query results. - /// - /// See tests/ui/next-solver/coinduction/incompleteness-unstable-result.rs for - /// an example of where this is needed. - /// - /// There can be multiple roots on the same stack, so we need to track - /// cycle participants per root: - /// ```plain - /// A :- B - /// B :- A, C - /// C :- D - /// D :- C - /// ``` - nested_goals: HashSet, + /// The nested goals of this goal, see the doc comment of the type. + nested_goals: NestedGoals, + /// Starts out as `None` and gets set when rerunning this /// goal in case we encounter a cycle. provisional_result: Option, } -/// The provisional result for a goal which is not on the stack. -#[derive(Debug)] -struct DetachedEntry { - /// The head of the smallest non-trivial cycle involving this entry. - /// - /// Given the following rules, when proving `A` the head for - /// the provisional entry of `C` would be `B`. - /// ```plain - /// A :- B - /// B :- C - /// C :- A + B + C - /// ``` - head: StackDepth, - result: X::Result, -} - -/// Stores the stack depth of a currently evaluated goal *and* already -/// computed results for goals which depend on other goals still on the stack. -/// -/// The provisional result may depend on whether the stack above it is inductive -/// or coinductive. Because of this, we store separate provisional results for -/// each case. If an provisional entry is not applicable, it may be the case -/// that we already have provisional result while computing a goal. In this case -/// we prefer the provisional result to potentially avoid fixpoint iterations. -/// See tests/ui/traits/next-solver/cycles/mixed-cycles-2.rs for an example. -/// -/// The provisional cache can theoretically result in changes to the observable behavior, -/// see tests/ui/traits/next-solver/cycles/provisional-cache-impacts-behavior.rs. -#[derive_where(Default; X: Cx)] +/// A provisional result of an already computed goals which depends on other +/// goals still on the stack. +#[derive_where(Debug; X: Cx)] struct ProvisionalCacheEntry { - stack_depth: Option, - with_inductive_stack: Option>, - with_coinductive_stack: Option>, -} - -impl ProvisionalCacheEntry { - fn is_empty(&self) -> bool { - self.stack_depth.is_none() - && self.with_inductive_stack.is_none() - && self.with_coinductive_stack.is_none() - } + /// Whether evaluating the goal encountered overflow. This is used to + /// disable the cache entry except if the last goal on the stack is + /// already involved in this cycle. + encountered_overflow: bool, + /// All cycle heads this cache entry depends on. + heads: CycleHeads, + /// The path from the highest cycle head to this goal. + path_from_head: PathKind, + nested_goals: NestedGoals, + result: X::Result, } pub struct SearchGraph, X: Cx = ::Cx> { @@ -247,7 +362,11 @@ pub struct SearchGraph, X: Cx = ::Cx> { /// /// An element is *deeper* in the stack if its index is *lower*. stack: IndexVec>, - provisional_cache: HashMap>, + /// The provisional cache contains entries for already computed goals which + /// still depend on goals higher-up in the stack. We don't move them to the + /// global cache and track them locally instead. A provisional cache entry + /// is only valid until the result of one of its cycle heads changes. + provisional_cache: HashMap>>, _marker: PhantomData, } @@ -266,77 +385,66 @@ impl, X: Cx> SearchGraph { self.mode } - fn update_parent_goal(&mut self, reached_depth: StackDepth, encountered_overflow: bool) { - if let Some(parent) = self.stack.raw.last_mut() { + /// Lazily update the stack entry for the parent goal. + /// This behavior is shared between actually evaluating goals + /// and using existing global cache entries to make sure they + /// have the same impact on the remaining evaluation. + fn update_parent_goal( + cx: X, + stack: &mut IndexVec>, + reached_depth: StackDepth, + heads: &CycleHeads, + encountered_overflow: bool, + nested_goals: &NestedGoals, + ) { + if let Some(parent_index) = stack.last_index() { + let parent = &mut stack[parent_index]; parent.reached_depth = parent.reached_depth.max(reached_depth); parent.encountered_overflow |= encountered_overflow; + + parent.heads.extend_from_child(parent_index, heads); + let step_kind = Self::step_kind(cx, parent.input); + parent.nested_goals.extend_from_child(step_kind, nested_goals); + // Once we've got goals which encountered overflow or a cycle, + // we track all goals whose behavior may depend depend on these + // goals as this change may cause them to now depend on additional + // goals, resulting in new cycles. See the dev-guide for examples. + if !nested_goals.is_empty() { + parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Coinductive)) + } } } pub fn is_empty(&self) -> bool { - self.stack.is_empty() + if self.stack.is_empty() { + debug_assert!(self.provisional_cache.is_empty()); + true + } else { + false + } } - fn stack_coinductive_from( - cx: X, - stack: &IndexVec>, - head: StackDepth, - ) -> bool { - stack.raw[head.index()..].iter().all(|entry| D::step_is_coinductive(cx, entry.input)) - } - - // When encountering a solver cycle, the result of the current goal - // depends on goals lower on the stack. - // - // We have to therefore be careful when caching goals. Only the final result - // of the cycle root, i.e. the lowest goal on the stack involved in this cycle, - // is moved to the global cache while all others are stored in a provisional cache. - // - // We update both the head of this cycle to rerun its evaluation until - // we reach a fixpoint and all other cycle participants to make sure that - // their result does not get moved to the global cache. - fn tag_cycle_participants( - stack: &mut IndexVec>, - usage_kind: Option, - head: StackDepth, - ) { - if let Some(usage_kind) = usage_kind { - stack[head].has_been_used = - Some(stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); - } - debug_assert!(stack[head].has_been_used.is_some()); - - // The current root of these cycles. Note that this may not be the final - // root in case a later goal depends on a goal higher up the stack. - let mut current_root = head; - while let Some(parent) = stack[current_root].non_root_cycle_participant { - current_root = parent; - debug_assert!(stack[current_root].has_been_used.is_some()); - } + /// The number of goals currently in the search graph. This should only be + /// used for debugging purposes. + pub fn debug_current_depth(&self) -> usize { + self.stack.len() + } - let (stack, cycle_participants) = stack.raw.split_at_mut(head.index() + 1); - let current_cycle_root = &mut stack[current_root.as_usize()]; - for entry in cycle_participants { - entry.non_root_cycle_participant = entry.non_root_cycle_participant.max(Some(head)); - current_cycle_root.nested_goals.insert(entry.input); - current_cycle_root.nested_goals.extend(mem::take(&mut entry.nested_goals)); - } + fn step_kind(cx: X, input: X::Input) -> PathKind { + if D::step_is_coinductive(cx, input) { PathKind::Coinductive } else { PathKind::Inductive } } - fn clear_dependent_provisional_results( - provisional_cache: &mut HashMap>, + /// Whether the path from `head` to the current stack entry is inductive or coinductive. + fn stack_path_kind( + cx: X, + stack: &IndexVec>, head: StackDepth, - ) { - #[allow(rustc::potential_query_instability)] - provisional_cache.retain(|_, entry| { - if entry.with_coinductive_stack.as_ref().is_some_and(|p| p.head == head) { - entry.with_coinductive_stack.take(); - } - if entry.with_inductive_stack.as_ref().is_some_and(|p| p.head == head) { - entry.with_inductive_stack.take(); - } - !entry.is_empty() - }); + ) -> PathKind { + if stack.raw[head.index()..].iter().all(|entry| D::step_is_coinductive(cx, entry.input)) { + PathKind::Coinductive + } else { + PathKind::Inductive + } } /// Probably the most involved method of the whole solver. @@ -348,89 +456,65 @@ impl, X: Cx> SearchGraph { cx: X, input: X::Input, inspect: &mut D::ProofTreeBuilder, - mut prove_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, + mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, ) -> X::Result { - self.check_invariants(); - // Check for overflow. let Some(available_depth) = AvailableDepth::allowed_depth_for_nested::(cx, &self.stack) else { - if let Some(last) = self.stack.raw.last_mut() { - last.encountered_overflow = true; - } - - debug!("encountered stack overflow"); - return D::on_stack_overflow(cx, inspect, input); + return self.handle_overflow(cx, input, inspect); }; - if let Some(result) = self.lookup_global_cache(cx, input, available_depth, inspect) { + // We check the provisional cache before checking the global cache. This simplifies + // the implementation as we can avoid worrying about cases where both the global and + // provisional cache may apply, e.g. consider the following example + // + // - xxBA overflow + // - A + // - BA cycle + // - CB :x: + if let Some(result) = self.lookup_provisional_cache(cx, input) { return result; } - // Check whether the goal is in the provisional cache. - // The provisional result may rely on the path to its cycle roots, - // so we have to check the path of the current goal matches that of - // the cache entry. - let cache_entry = self.provisional_cache.entry(input).or_default(); - if let Some(entry) = cache_entry - .with_coinductive_stack - .as_ref() - .filter(|p| Self::stack_coinductive_from(cx, &self.stack, p.head)) - .or_else(|| { - cache_entry - .with_inductive_stack - .as_ref() - .filter(|p| !Self::stack_coinductive_from(cx, &self.stack, p.head)) - }) - { - debug!("provisional cache hit"); - // We have a nested goal which is already in the provisional cache, use - // its result. We do not provide any usage kind as that should have been - // already set correctly while computing the cache entry. - inspect.on_provisional_cache_hit(); - Self::tag_cycle_participants(&mut self.stack, None, entry.head); - return entry.result; - } else if let Some(stack_depth) = cache_entry.stack_depth { - debug!("encountered cycle with depth {stack_depth:?}"); - // We have a nested goal which directly relies on a goal deeper in the stack. - // - // We start by tagging all cycle participants, as that's necessary for caching. - // - // Finally we can return either the provisional response or the initial response - // in case we're in the first fixpoint iteration for this goal. - inspect.on_cycle_in_stack(); - - let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth); - let cycle_kind = - if is_coinductive_cycle { CycleKind::Coinductive } else { CycleKind::Inductive }; - Self::tag_cycle_participants( - &mut self.stack, - Some(UsageKind::Single(cycle_kind)), - stack_depth, - ); - - // Return the provisional result or, if we're in the first iteration, - // start with no constraints. - return if let Some(result) = self.stack[stack_depth].provisional_result { - result - } else { - D::initial_provisional_result(cx, cycle_kind, input) - }; + // Lookup the global cache unless we're building proof trees or are currently + // fuzzing. + let validate_cache = if !D::inspect_is_noop(inspect) { + None + } else if let Some(scope) = D::enter_validation_scope(cx, input) { + // When validating the global cache we need to track the goals for which the + // global cache has been disabled as it may otherwise change the result for + // cyclic goals. We don't care about goals which are not on the current stack + // so it's fine to drop their scope eagerly. + self.lookup_global_cache_untracked(cx, input, available_depth) + .inspect(|expected| debug!(?expected, "validate cache entry")) + .map(|r| (scope, r)) + } else if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { + return result; } else { - // No entry, we push this goal on the stack and try to prove it. - let depth = self.stack.next_index(); - let entry = StackEntry { - input, - available_depth, - reached_depth: depth, - non_root_cycle_participant: None, - encountered_overflow: false, - has_been_used: None, - nested_goals: Default::default(), - provisional_result: None, - }; - assert_eq!(self.stack.push(entry), depth); - cache_entry.stack_depth = Some(depth); + None + }; + + // Detect cycles on the stack. We do this after the global cache lookup to + // avoid iterating over the stack in case a goal has already been computed. + // This may not have an actual performance impact and we could reorder them + // as it may reduce the number of `nested_goals` we need to track. + if let Some(result) = self.check_cycle_on_stack(cx, input) { + debug_assert!(validate_cache.is_none(), "global cache and cycle on stack"); + return result; + } + + // Unfortunate, it looks like we actually have to compute this goalrar. + let depth = self.stack.next_index(); + let entry = StackEntry { + input, + available_depth, + reached_depth: depth, + heads: Default::default(), + encountered_overflow: false, + has_been_used: None, + nested_goals: Default::default(), + provisional_result: None, }; + assert_eq!(self.stack.push(entry), depth); // This is for global caching, so we properly track query dependencies. // Everything that affects the `result` should be performed within this @@ -439,65 +523,320 @@ impl, X: Cx> SearchGraph { // must not be added to the global cache. Notably, this is the case for // trait solver cycles participants. let ((final_entry, result), dep_node) = cx.with_cached_task(|| { - for _ in 0..D::FIXPOINT_STEP_LIMIT { - match self.fixpoint_step_in_task(cx, input, inspect, &mut prove_goal) { - StepResult::Done(final_entry, result) => return (final_entry, result), - StepResult::HasChanged => debug!("fixpoint changed provisional results"), - } + self.evaluate_goal_in_task(cx, input, inspect, &mut evaluate_goal) + }); + + // We've finished computing the goal and have popped it from the stack, + // lazily update its parent goal. + Self::update_parent_goal( + cx, + &mut self.stack, + final_entry.reached_depth, + &final_entry.heads, + final_entry.encountered_overflow, + &final_entry.nested_goals, + ); + + // We're now done with this goal. We only add the root of cycles to the global cache. + // In case this goal is involved in a larger cycle add it to the provisional cache. + if final_entry.heads.is_empty() { + if let Some((_scope, expected)) = validate_cache { + // Do not try to move a goal into the cache again if we're testing + // the global cache. + assert_eq!(result, expected, "input={input:?}"); + } else if D::inspect_is_noop(inspect) { + self.insert_global_cache(cx, input, final_entry, result, dep_node) } + } else if D::ENABLE_PROVISIONAL_CACHE { + debug_assert!(validate_cache.is_none()); + let entry = self.provisional_cache.entry(input).or_default(); + let StackEntry { heads, nested_goals, encountered_overflow, .. } = final_entry; + let path_from_head = Self::stack_path_kind(cx, &self.stack, heads.highest_cycle_head()); + entry.push(ProvisionalCacheEntry { + encountered_overflow, + heads, + path_from_head, + nested_goals, + result, + }); + } else { + debug_assert!(validate_cache.is_none()); + } + + result + } + + fn handle_overflow( + &mut self, + cx: X, + input: X::Input, + inspect: &mut D::ProofTreeBuilder, + ) -> X::Result { + if let Some(last) = self.stack.raw.last_mut() { + last.encountered_overflow = true; + // If computing a goal `B` depends on another goal `A` and + // `A` has a nested goal which overflows, then computing `B` + // at the same depth, but with `A` already on the stack, + // would encounter a solver cycle instead, potentially + // changing the result. + // + // We must therefore not use the global cache entry for `B` in that case. + // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs + last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive)); + } - debug!("canonical cycle overflow"); - let current_entry = self.stack.pop().unwrap(); - debug_assert!(current_entry.has_been_used.is_none()); - let result = D::on_fixpoint_overflow(cx, input); - (current_entry, result) + debug!("encountered stack overflow"); + D::on_stack_overflow(cx, inspect, input) + } + + /// When reevaluating a goal with a changed provisional result, all provisional cache entry + /// which depend on this goal get invalidated. + fn clear_dependent_provisional_results(&mut self) { + let head = self.stack.next_index(); + #[allow(rustc::potential_query_instability)] + self.provisional_cache.retain(|_, entries| { + entries.retain(|entry| entry.heads.highest_cycle_head() != head); + !entries.is_empty() }); + } - let proof_tree = inspect.finalize_canonical_goal_evaluation(cx); + /// A necessary optimization to handle complex solver cycles. A provisional cache entry + /// relies on a set of cycle heads and the path towards these heads. When popping a cycle + /// head from the stack after we've finished computing it, we can't be sure that the + /// provisional cache entry is still applicable. We need to keep the cache entries to + /// prevent hangs. + /// + /// What we therefore do is check whether the cycle kind of all cycles the goal of a + /// provisional cache entry is involved in would stay the same when computing the + /// goal without its cycle head on the stack. For more details, see the relevant + /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html). + /// + /// This can be thought of rotating the sub-tree of this provisional result and changing + /// its entry point while making sure that all paths through this sub-tree stay the same. + /// + /// + /// In case the popped cycle head failed to reach a fixpoint anything which depends on + /// its provisional result is invalid. Actually discarding provisional cache entries in + /// this case would cause hangs, so we instead change the result of dependant provisional + /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested + /// goals whose result doesn't actually depend on this cycle head, but that's acceptable + /// to me. + fn rebase_provisional_cache_entries( + &mut self, + cx: X, + stack_entry: &StackEntry, + mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result, + ) { + let head = self.stack.next_index(); + #[allow(rustc::potential_query_instability)] + self.provisional_cache.retain(|&input, entries| { + entries.retain_mut(|entry| { + let ProvisionalCacheEntry { + encountered_overflow: _, + heads, + path_from_head, + nested_goals, + result, + } = entry; + if heads.highest_cycle_head() != head { + return true; + } - self.update_parent_goal(final_entry.reached_depth, final_entry.encountered_overflow); + // We don't try rebasing if the path from the current head + // to the cache entry is not coinductive or if the path from + // the cache entry to the current head is not coinductive. + // + // Both of these constraints could be weakened, but by only + // accepting coinductive paths we don't have to worry about + // changing the cycle kind of the remaining cycles. We can + // extend this in the future once there's a known issue + // caused by it. + if *path_from_head != PathKind::Coinductive + || nested_goals.get(stack_entry.input).unwrap() + != UsageKind::Single(PathKind::Coinductive) + { + return false; + } - // We're now done with this goal. In case this goal is involved in a larger cycle - // do not remove it from the provisional cache and update its provisional result. - // We only add the root of cycles to the global cache. - if let Some(head) = final_entry.non_root_cycle_participant { - let coinductive_stack = Self::stack_coinductive_from(cx, &self.stack, head); + // Merge the cycle heads of the provisional cache entry and the + // popped head. If the popped cycle head was a root, discard all + // provisional cache entries which depend on it. + heads.remove_highest_cycle_head(); + heads.merge(&stack_entry.heads); + let Some(head) = heads.opt_highest_cycle_head() else { + return false; + }; - let entry = self.provisional_cache.get_mut(&input).unwrap(); - entry.stack_depth = None; - if coinductive_stack { - entry.with_coinductive_stack = Some(DetachedEntry { head, result }); - } else { - entry.with_inductive_stack = Some(DetachedEntry { head, result }); + // As we've made sure that the path from the new highest cycle + // head to the uses of the popped cycle head are fully coinductive, + // we can be sure that the paths to all nested goals of the popped + // cycle head remain the same. We can simply merge them. + nested_goals.merge(&stack_entry.nested_goals); + // We now care about the path from the next highest cycle head to the + // provisional cache entry. + *path_from_head = Self::stack_path_kind(cx, &self.stack, head); + // Mutate the result of the provisional cache entry in case we did + // not reach a fixpoint. + *result = mutate_result(input, *result); + true + }); + !entries.is_empty() + }); + } + + fn lookup_provisional_cache(&mut self, cx: X, input: X::Input) -> Option { + if !D::ENABLE_PROVISIONAL_CACHE { + return None; + } + + let entries = self.provisional_cache.get(&input)?; + for &ProvisionalCacheEntry { + encountered_overflow, + ref heads, + path_from_head, + ref nested_goals, + result, + } in entries + { + let head = heads.highest_cycle_head(); + if encountered_overflow { + // This check is overly strict and very subtle. We need to make sure that if + // a global cache entry depends on some goal without adding it to its + // `nested_goals`, that goal must never have an applicable provisional + // cache entry to avoid incorrectly applying the cache entry. + // + // As we'd have to otherwise track literally all nested goals, we only + // apply provisional cache entries which encountered overflow once the + // current goal is already part of the same cycle. This check could be + // improved but seems to be good enough for now. + let last = self.stack.raw.last().unwrap(); + if !last.heads.opt_lowest_cycle_head().is_some_and(|lowest| lowest <= head) { + continue; + } } - } else { - // When encountering a cycle, both inductive and coinductive, we only - // move the root into the global cache. We also store all other cycle - // participants involved. - // - // We must not use the global cache entry of a root goal if a cycle - // participant is on the stack. This is necessary to prevent unstable - // results. See the comment of `StackEntry::nested_goals` for - // more details. - self.provisional_cache.remove(&input); - let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); - cx.with_global_cache(self.mode, |cache| { - cache.insert( + + // A provisional cache entry is only valid if the current path from its + // highest cycle head to the goal is the same. + if path_from_head == Self::stack_path_kind(cx, &self.stack, head) { + // While we don't have to track the full depth of the provisional cache entry, + // we do have to increment the required depth by one as we'd have already failed + // with overflow otherwise + let next_index = self.stack.next_index(); + let last = &mut self.stack.raw.last_mut().unwrap(); + let path_from_entry = Self::step_kind(cx, last.input); + last.nested_goals.insert(input, UsageKind::Single(path_from_entry)); + + Self::update_parent_goal( cx, - input, - result, - proof_tree, - dep_node, - additional_depth, - final_entry.encountered_overflow, - &final_entry.nested_goals, - ) - }) + &mut self.stack, + next_index, + heads, + false, + nested_goals, + ); + debug_assert!(self.stack[head].has_been_used.is_some()); + debug!(?head, ?path_from_head, "provisional cache hit"); + return Some(result); + } } - self.check_invariants(); + None + } - result + /// Even if there is a global cache entry for a given goal, we need to make sure + /// evaluating this entry would not have ended up depending on either a goal + /// already on the stack or a provisional cache entry. + fn candidate_is_applicable( + cx: X, + stack: &IndexVec>, + provisional_cache: &HashMap>>, + nested_goals: &NestedGoals, + ) -> bool { + // If the global cache entry didn't depend on any nested goals, it always + // applies. + if nested_goals.is_empty() { + return true; + } + + // If a nested goal of the global cache entry is on the stack, we would + // definitely encounter a cycle. + if stack.iter().any(|e| nested_goals.contains(e.input)) { + debug!("cache entry not applicable due to stack"); + return false; + } + + // The global cache entry is also invalid if there's a provisional cache entry + // would apply for any of its nested goals. + #[allow(rustc::potential_query_instability)] + for (input, path_from_global_entry) in nested_goals.iter() { + let Some(entries) = provisional_cache.get(&input) else { + continue; + }; + + debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable"); + // A provisional cache entry is applicable if the path to + // its highest cycle head is equal to the expected path. + for &ProvisionalCacheEntry { + encountered_overflow, + ref heads, + path_from_head, + nested_goals: _, + result: _, + } in entries.iter() + { + // We don't have to worry about provisional cache entries which encountered + // overflow, see the relevant comment in `lookup_provisional_cache`. + if encountered_overflow { + continue; + } + + // A provisional cache entry only applies if the path from its highest head + // matches the path when encountering the goal. + let head = heads.highest_cycle_head(); + let full_path = match Self::stack_path_kind(cx, stack, head) { + PathKind::Coinductive => path_from_global_entry, + PathKind::Inductive => UsageKind::Single(PathKind::Inductive), + }; + + match (full_path, path_from_head) { + (UsageKind::Mixed, _) + | (UsageKind::Single(PathKind::Coinductive), PathKind::Coinductive) + | (UsageKind::Single(PathKind::Inductive), PathKind::Inductive) => { + debug!( + ?full_path, + ?path_from_head, + "cache entry not applicable due to matching paths" + ); + return false; + } + _ => debug!(?full_path, ?path_from_head, "paths don't match"), + } + } + } + + true + } + + /// Used when fuzzing the global cache. Accesses the global cache without + /// updating the state of the search graph. + fn lookup_global_cache_untracked( + &self, + cx: X, + input: X::Input, + available_depth: AvailableDepth, + ) -> Option { + cx.with_global_cache(self.mode, |cache| { + cache + .get(cx, input, available_depth, |nested_goals| { + Self::candidate_is_applicable( + cx, + &self.stack, + &self.provisional_cache, + nested_goals, + ) + }) + .map(|c| c.result) + }) } /// Try to fetch a previously computed result from the global cache, @@ -508,97 +847,206 @@ impl, X: Cx> SearchGraph { cx: X, input: X::Input, available_depth: AvailableDepth, - inspect: &mut D::ProofTreeBuilder, ) -> Option { cx.with_global_cache(self.mode, |cache| { - let CacheData { - result, - proof_tree, - additional_depth, - encountered_overflow, - nested_goals: _, // FIXME: consider nested goals here. - } = cache.get(cx, input, &self.stack, available_depth)?; - - // If we're building a proof tree and the current cache entry does not - // contain a proof tree, we do not use the entry but instead recompute - // the goal. We simply overwrite the existing entry once we're done, - // caching the proof tree. - if !inspect.try_apply_proof_tree(proof_tree) { - return None; - } + let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache + .get(cx, input, available_depth, |nested_goals| { + Self::candidate_is_applicable( + cx, + &self.stack, + &self.provisional_cache, + nested_goals, + ) + })?; // Update the reached depth of the current goal to make sure // its state is the same regardless of whether we've used the // global cache or not. let reached_depth = self.stack.next_index().plus(additional_depth); - self.update_parent_goal(reached_depth, encountered_overflow); + // We don't move cycle participants to the global cache, so the + // cycle heads are always empty. + let heads = Default::default(); + Self::update_parent_goal( + cx, + &mut self.stack, + reached_depth, + &heads, + encountered_overflow, + nested_goals, + ); - debug!("global cache hit"); + debug!(?additional_depth, "global cache hit"); Some(result) }) } -} -enum StepResult { - Done(StackEntry, X::Result), - HasChanged, -} + fn check_cycle_on_stack(&mut self, cx: X, input: X::Input) -> Option { + let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?; + debug!("encountered cycle with depth {head:?}"); + // We have a nested goal which directly relies on a goal deeper in the stack. + // + // We start by tagging all cycle participants, as that's necessary for caching. + // + // Finally we can return either the provisional response or the initial response + // in case we're in the first fixpoint iteration for this goal. + let path_kind = Self::stack_path_kind(cx, &self.stack, head); + let usage_kind = UsageKind::Single(path_kind); + self.stack[head].has_been_used = + Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); + + // Subtle: when encountering a cyclic goal, we still first checked for overflow, + // so we have to update the reached depth. + let next_index = self.stack.next_index(); + let last_index = self.stack.last_index().unwrap(); + let last = &mut self.stack[last_index]; + last.reached_depth = last.reached_depth.max(next_index); + + let path_from_entry = Self::step_kind(cx, last.input); + last.nested_goals.insert(input, UsageKind::Single(path_from_entry)); + last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive)); + if last_index != head { + last.heads.insert(head); + } + + // Return the provisional result or, if we're in the first iteration, + // start with no constraints. + if let Some(result) = self.stack[head].provisional_result { + Some(result) + } else { + Some(D::initial_provisional_result(cx, path_kind, input)) + } + } + + /// Whether we've reached a fixpoint when evaluating a cycle head. + fn reached_fixpoint( + &mut self, + cx: X, + stack_entry: &StackEntry, + usage_kind: UsageKind, + result: X::Result, + ) -> bool { + if let Some(prev) = stack_entry.provisional_result { + prev == result + } else if let UsageKind::Single(kind) = usage_kind { + D::is_initial_provisional_result(cx, kind, stack_entry.input, result) + } else { + false + } + } -impl, X: Cx> SearchGraph { /// When we encounter a coinductive cycle, we have to fetch the /// result of that cycle while we are still computing it. Because /// of this we continuously recompute the cycle until the result /// of the previous iteration is equal to the final result, at which /// point we are done. - fn fixpoint_step_in_task( + fn evaluate_goal_in_task( &mut self, cx: X, input: X::Input, inspect: &mut D::ProofTreeBuilder, - prove_goal: &mut F, - ) -> StepResult - where - F: FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, - { - let result = prove_goal(self, inspect); - let stack_entry = self.stack.pop().unwrap(); - debug_assert_eq!(stack_entry.input, input); - - // If the current goal is not the root of a cycle, we are done. - let Some(usage_kind) = stack_entry.has_been_used else { - return StepResult::Done(stack_entry, result); - }; + mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, + ) -> (StackEntry, X::Result) { + let mut i = 0; + loop { + let result = evaluate_goal(self, inspect); + let stack_entry = self.stack.pop().unwrap(); + debug_assert_eq!(stack_entry.input, input); - // If it is a cycle head, we have to keep trying to prove it until - // we reach a fixpoint. We need to do so for all cycle heads, - // not only for the root. - // - // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs - // for an example. - - // Start by clearing all provisional cache entries which depend on this - // the current goal. - Self::clear_dependent_provisional_results( - &mut self.provisional_cache, - self.stack.next_index(), - ); + // If the current goal is not the root of a cycle, we are done. + // + // There are no provisional cache entries which depend on this goal. + let Some(usage_kind) = stack_entry.has_been_used else { + return (stack_entry, result); + }; - // Check whether we reached a fixpoint, either because the final result - // is equal to the provisional result of the previous iteration, or because - // this was only the root of either coinductive or inductive cycles, and the - // final result is equal to the initial response for that case. - // - // If we did not reach a fixpoint, update the provisional result and reevaluate. - if D::reached_fixpoint(cx, usage_kind, input, stack_entry.provisional_result, result) { - StepResult::Done(stack_entry, result) - } else { - let depth = self.stack.push(StackEntry { + // If it is a cycle head, we have to keep trying to prove it until + // we reach a fixpoint. We need to do so for all cycle heads, + // not only for the root. + // + // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs + // for an example. + // + // Check whether we reached a fixpoint, either because the final result + // is equal to the provisional result of the previous iteration, or because + // this was only the root of either coinductive or inductive cycles, and the + // final result is equal to the initial response for that case. + if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) { + self.rebase_provisional_cache_entries(cx, &stack_entry, |_, result| result); + return (stack_entry, result); + } + + // If computing this goal results in ambiguity with no constraints, + // we do not rerun it. It's incredibly difficult to get a different + // response in the next iteration in this case. These changes would + // likely either be caused by incompleteness or can change the maybe + // cause from ambiguity to overflow. Returning ambiguity always + // preserves soundness and completeness even if the goal is be known + // to succeed or fail. + // + // This prevents exponential blowup affecting multiple major crates. + // As we only get to this branch if we haven't yet reached a fixpoint, + // we also taint all provisional cache entries which depend on the + // current goal. + if D::is_ambiguous_result(result) { + self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { + D::propagate_ambiguity(cx, input, result) + }); + return (stack_entry, result); + }; + + // If we've reached the fixpoint step limit, we bail with overflow and taint all + // provisional cache entries which depend on the current goal. + i += 1; + if i >= D::FIXPOINT_STEP_LIMIT { + debug!("canonical cycle overflow"); + let result = D::on_fixpoint_overflow(cx, input); + self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { + D::on_fixpoint_overflow(cx, input) + }); + return (stack_entry, result); + } + + // Clear all provisional cache entries which depend on a previous provisional + // result of this goal and rerun. + self.clear_dependent_provisional_results(); + + debug!(?result, "fixpoint changed provisional results"); + self.stack.push(StackEntry { has_been_used: None, provisional_result: Some(result), ..stack_entry }); - debug_assert_eq!(self.provisional_cache[&input].stack_depth, Some(depth)); - StepResult::HasChanged } } + + /// When encountering a cycle, both inductive and coinductive, we only + /// move the root into the global cache. We also store all other cycle + /// participants involved. + /// + /// We must not use the global cache entry of a root goal if a cycle + /// participant is on the stack. This is necessary to prevent unstable + /// results. See the comment of `StackEntry::nested_goals` for + /// more details. + fn insert_global_cache( + &mut self, + cx: X, + input: X::Input, + final_entry: StackEntry, + result: X::Result, + dep_node: X::DepNodeIndex, + ) { + let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); + debug!(?final_entry, ?result, "insert global cache"); + cx.with_global_cache(self.mode, |cache| { + cache.insert( + cx, + input, + result, + dep_node, + additional_depth, + final_entry.encountered_overflow, + final_entry.nested_goals, + ) + }) + } } diff --git a/compiler/rustc_type_ir/src/search_graph/validate.rs b/compiler/rustc_type_ir/src/search_graph/validate.rs deleted file mode 100644 index 1ae806834ba7d..0000000000000 --- a/compiler/rustc_type_ir/src/search_graph/validate.rs +++ /dev/null @@ -1,75 +0,0 @@ -use super::*; - -impl, X: Cx> SearchGraph { - #[allow(rustc::potential_query_instability)] - pub(super) fn check_invariants(&self) { - if !cfg!(debug_assertions) { - return; - } - - let SearchGraph { mode: _, stack, provisional_cache, _marker } = self; - if stack.is_empty() { - assert!(provisional_cache.is_empty()); - } - - for (depth, entry) in stack.iter_enumerated() { - let StackEntry { - input, - available_depth: _, - reached_depth: _, - non_root_cycle_participant, - encountered_overflow: _, - has_been_used, - ref nested_goals, - provisional_result, - } = *entry; - let cache_entry = provisional_cache.get(&entry.input).unwrap(); - assert_eq!(cache_entry.stack_depth, Some(depth)); - if let Some(head) = non_root_cycle_participant { - assert!(head < depth); - assert!(nested_goals.is_empty()); - assert_ne!(stack[head].has_been_used, None); - - let mut current_root = head; - while let Some(parent) = stack[current_root].non_root_cycle_participant { - current_root = parent; - } - assert!(stack[current_root].nested_goals.contains(&input)); - } - - if !nested_goals.is_empty() { - assert!(provisional_result.is_some() || has_been_used.is_some()); - for entry in stack.iter().take(depth.as_usize()) { - assert_eq!(nested_goals.get(&entry.input), None); - } - } - } - - for (&input, entry) in &self.provisional_cache { - let ProvisionalCacheEntry { stack_depth, with_coinductive_stack, with_inductive_stack } = - entry; - assert!( - stack_depth.is_some() - || with_coinductive_stack.is_some() - || with_inductive_stack.is_some() - ); - - if let &Some(stack_depth) = stack_depth { - assert_eq!(stack[stack_depth].input, input); - } - - let check_detached = |detached_entry: &DetachedEntry| { - let DetachedEntry { head, result: _ } = *detached_entry; - assert_ne!(stack[head].has_been_used, None); - }; - - if let Some(with_coinductive_stack) = with_coinductive_stack { - check_detached(with_coinductive_stack); - } - - if let Some(with_inductive_stack) = with_inductive_stack { - check_detached(with_inductive_stack); - } - } - } -} diff --git a/compiler/rustc_type_ir/src/solve/inspect.rs b/compiler/rustc_type_ir/src/solve/inspect.rs index 47d5e0dace71f..099c66f6bdc81 100644 --- a/compiler/rustc_type_ir/src/solve/inspect.rs +++ b/compiler/rustc_type_ir/src/solve/inspect.rs @@ -69,9 +69,7 @@ pub struct CanonicalGoalEvaluation { #[derive_where(PartialEq, Eq, Hash, Debug; I: Interner)] pub enum CanonicalGoalEvaluationKind { Overflow, - CycleInStack, - ProvisionalCacheHit, - Evaluation { final_revision: I::CanonicalGoalEvaluationStepRef }, + Evaluation { final_revision: CanonicalGoalEvaluationStep }, } #[derive_where(PartialEq, Eq, Hash, Debug; I: Interner)] diff --git a/compiler/rustc_type_ir/src/solve/mod.rs b/compiler/rustc_type_ir/src/solve/mod.rs index 444fd01f01281..00fc6ba1c5c8f 100644 --- a/compiler/rustc_type_ir/src/solve/mod.rs +++ b/compiler/rustc_type_ir/src/solve/mod.rs @@ -340,11 +340,3 @@ impl MaybeCause { } } } - -#[derive_where(PartialEq, Eq, Debug; I: Interner)] -pub struct CacheData { - pub result: QueryResult, - pub proof_tree: Option, - pub additional_depth: usize, - pub encountered_overflow: bool, -} diff --git a/library/Cargo.lock b/library/Cargo.lock index b36399d880e5d..815f5bb1385bf 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -339,6 +339,7 @@ dependencies = [ "std_detect", "unwind", "wasi", + "windows-targets 0.0.0", ] [[package]] @@ -421,9 +422,13 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.5", ] +[[package]] +name = "windows-targets" +version = "0.0.0" + [[package]] name = "windows-targets" version = "0.52.5" diff --git a/library/Cargo.toml b/library/Cargo.toml index c4513b4c127d8..d8ece6b0ebd3e 100644 --- a/library/Cargo.toml +++ b/library/Cargo.toml @@ -8,6 +8,7 @@ members = [ exclude = [ # stdarch has its own Cargo workspace "stdarch", + "windows_targets" ] [profile.release.package.compiler_builtins] diff --git a/library/core/src/ops/coroutine.rs b/library/core/src/ops/coroutine.rs index 13df888d24c5c..c7d596d74c383 100644 --- a/library/core/src/ops/coroutine.rs +++ b/library/core/src/ops/coroutine.rs @@ -69,6 +69,7 @@ pub enum CoroutineState { #[lang = "coroutine"] #[unstable(feature = "coroutine_trait", issue = "43122")] #[fundamental] +#[must_use = "coroutines are lazy and do nothing unless resumed"] pub trait Coroutine { /// The type of value this coroutine yields. /// diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 2ce284c85e2d6..7f23681ee45fc 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -57,6 +57,9 @@ object = { version = "0.36.0", default-features = false, optional = true, featur 'archive', ] } +[target.'cfg(windows)'.dependencies.windows-targets] +path = "../windows_targets" + [dev-dependencies] rand = { version = "0.8.5", default-features = false, features = ["alloc"] } rand_xorshift = "0.3.0" @@ -116,7 +119,7 @@ std_detect_env_override = ["std_detect/std_detect_env_override"] # Enable using raw-dylib for Windows imports. # This will eventually be the default. -windows_raw_dylib = [] +windows_raw_dylib = ["windows-targets/windows_raw_dylib"] [package.metadata.fortanix-sgx] # Maximum possible number of threads when testing diff --git a/library/std/src/env.rs b/library/std/src/env.rs index 50ae83090c7e1..80890e61471c6 100644 --- a/library/std/src/env.rs +++ b/library/std/src/env.rs @@ -355,7 +355,13 @@ impl Error for VarError { /// } /// assert_eq!(env::var(key), Ok("VALUE".to_string())); /// ``` -#[rustc_deprecated_safe_2024] +#[cfg_attr(bootstrap, rustc_deprecated_safe_2024)] +#[cfg_attr( + not(bootstrap), + rustc_deprecated_safe_2024( + audit_that = "the environment access only happens in single-threaded code" + ) +)] #[stable(feature = "env", since = "1.0.0")] pub unsafe fn set_var, V: AsRef>(key: K, value: V) { let (key, value) = (key.as_ref(), value.as_ref()); @@ -419,7 +425,13 @@ pub unsafe fn set_var, V: AsRef>(key: K, value: V) { /// } /// assert!(env::var(key).is_err()); /// ``` -#[rustc_deprecated_safe_2024] +#[cfg_attr(bootstrap, rustc_deprecated_safe_2024)] +#[cfg_attr( + not(bootstrap), + rustc_deprecated_safe_2024( + audit_that = "the environment access only happens in single-threaded code" + ) +)] #[stable(feature = "env", since = "1.0.0")] pub unsafe fn remove_var>(key: K) { let key = key.as_ref(); diff --git a/library/std/src/sys/pal/windows/alloc.rs b/library/std/src/sys/pal/windows/alloc.rs index 92b68b26032c6..2205885687dea 100644 --- a/library/std/src/sys/pal/windows/alloc.rs +++ b/library/std/src/sys/pal/windows/alloc.rs @@ -4,7 +4,7 @@ use crate::alloc::{GlobalAlloc, Layout, System}; use crate::ffi::c_void; use crate::ptr; use crate::sync::atomic::{AtomicPtr, Ordering}; -use crate::sys::c::{self, windows_targets}; +use crate::sys::c; use crate::sys::common::alloc::{realloc_fallback, MIN_ALIGN}; #[cfg(test)] diff --git a/library/std/src/sys/pal/windows/c.rs b/library/std/src/sys/pal/windows/c.rs index 08b75186aef90..2f5d75dc4bc23 100644 --- a/library/std/src/sys/pal/windows/c.rs +++ b/library/std/src/sys/pal/windows/c.rs @@ -8,8 +8,6 @@ use core::ffi::{c_uint, c_ulong, c_ushort, c_void, CStr}; use core::{mem, ptr}; -pub(super) mod windows_targets; - mod windows_sys; pub use windows_sys::*; diff --git a/library/std/src/sys/pal/windows/c/windows_sys.rs b/library/std/src/sys/pal/windows/c/windows_sys.rs index 9f22f54819509..529c96a0e1e6b 100644 --- a/library/std/src/sys/pal/windows/c/windows_sys.rs +++ b/library/std/src/sys/pal/windows/c/windows_sys.rs @@ -3317,4 +3317,3 @@ pub struct WSADATA { #[cfg(target_arch = "arm")] pub enum CONTEXT {} // ignore-tidy-filelength -use super::windows_targets; diff --git a/library/windows_targets/Cargo.toml b/library/windows_targets/Cargo.toml new file mode 100644 index 0000000000000..94d7c8210647c --- /dev/null +++ b/library/windows_targets/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "windows-targets" +description = "A drop-in replacement for the real windows-targets crate for use in std only." +version = "0.0.0" +edition = "2021" + +[features] +# Enable using raw-dylib for Windows imports. +# This will eventually be the default. +windows_raw_dylib = [] diff --git a/library/std/src/sys/pal/windows/c/windows_targets.rs b/library/windows_targets/src/lib.rs similarity index 95% rename from library/std/src/sys/pal/windows/c/windows_targets.rs rename to library/windows_targets/src/lib.rs index 252bceb70942b..1965b6cf4ce8f 100644 --- a/library/std/src/sys/pal/windows/c/windows_targets.rs +++ b/library/windows_targets/src/lib.rs @@ -2,6 +2,10 @@ //! //! This is a simple wrapper around an `extern` block with a `#[link]` attribute. //! It's very roughly equivalent to the windows-targets crate. +#![no_std] +#![no_core] +#![feature(decl_macro)] +#![feature(no_core)] #[cfg(feature = "windows_raw_dylib")] pub macro link { diff --git a/src/librustdoc/config.rs b/src/librustdoc/config.rs index 9c7a9f8467f5a..d599fead2668a 100644 --- a/src/librustdoc/config.rs +++ b/src/librustdoc/config.rs @@ -733,9 +733,11 @@ impl Options { let html_no_source = matches.opt_present("html-no-source"); if generate_link_to_definition && (show_coverage || output_format != OutputFormat::Html) { - dcx.fatal( - "--generate-link-to-definition option can only be used with HTML output format", - ); + dcx.struct_warn( + "`--generate-link-to-definition` option can only be used with HTML output format", + ) + .with_note("`--generate-link-to-definition` option will be ignored") + .emit(); } let scrape_examples_options = ScrapeExamplesOptions::new(matches, dcx); diff --git a/src/tools/compiletest/src/json.rs b/src/tools/compiletest/src/json.rs index 76b83f02b149a..0da93dcafa20b 100644 --- a/src/tools/compiletest/src/json.rs +++ b/src/tools/compiletest/src/json.rs @@ -1,5 +1,4 @@ //! These structs are a subset of the ones found in `rustc_errors::json`. -//! They are only used for deserialization of JSON output provided by libtest. use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -127,11 +126,10 @@ pub fn extract_rendered(output: &str) -> String { // Ignore the notification. None } else { - print!( - "failed to decode compiler output as json: line: {}\noutput: {}", - line, output - ); - panic!() + // This function is called for both compiler and non-compiler output, + // so if the line isn't recognized as JSON from the compiler then + // just print it as-is. + Some(format!("{line}\n")) } } else { // preserve non-JSON lines, such as ICEs diff --git a/src/tools/generate-windows-sys/src/main.rs b/src/tools/generate-windows-sys/src/main.rs index fe1b1bd5ceb14..6dbf29d957f12 100644 --- a/src/tools/generate-windows-sys/src/main.rs +++ b/src/tools/generate-windows-sys/src/main.rs @@ -35,7 +35,6 @@ fn main() -> Result<(), Box> { let mut f = std::fs::File::options().append(true).open("windows_sys.rs")?; f.write_all(ARM32_SHIM.as_bytes())?; writeln!(&mut f, "// ignore-tidy-filelength")?; - writeln!(&mut f, "use super::windows_targets;")?; Ok(()) } diff --git a/src/tools/run-make-support/src/external_deps/llvm.rs b/src/tools/run-make-support/src/external_deps/llvm.rs index dc651fdd8205a..7af79443affd6 100644 --- a/src/tools/run-make-support/src/external_deps/llvm.rs +++ b/src/tools/run-make-support/src/external_deps/llvm.rs @@ -48,6 +48,12 @@ pub fn llvm_bcanalyzer() -> LlvmBcanalyzer { LlvmBcanalyzer::new() } +/// Construct a new `llvm-dwarfdump` invocation. This assumes that `llvm-dwarfdump` is available +/// at `$LLVM_BIN_DIR/llvm-dwarfdump`. +pub fn llvm_dwarfdump() -> LlvmDwarfdump { + LlvmDwarfdump::new() +} + /// A `llvm-readobj` invocation builder. #[derive(Debug)] #[must_use] @@ -97,6 +103,13 @@ pub struct LlvmBcanalyzer { cmd: Command, } +/// A `llvm-dwarfdump` invocation builder. +#[derive(Debug)] +#[must_use] +pub struct LlvmDwarfdump { + cmd: Command, +} + crate::macros::impl_common_helpers!(LlvmReadobj); crate::macros::impl_common_helpers!(LlvmProfdata); crate::macros::impl_common_helpers!(LlvmFilecheck); @@ -104,6 +117,7 @@ crate::macros::impl_common_helpers!(LlvmObjdump); crate::macros::impl_common_helpers!(LlvmAr); crate::macros::impl_common_helpers!(LlvmNm); crate::macros::impl_common_helpers!(LlvmBcanalyzer); +crate::macros::impl_common_helpers!(LlvmDwarfdump); /// Generate the path to the bin directory of LLVM. #[must_use] @@ -317,3 +331,19 @@ impl LlvmBcanalyzer { self } } + +impl LlvmDwarfdump { + /// Construct a new `llvm-dwarfdump` invocation. This assumes that `llvm-dwarfdump` is available + /// at `$LLVM_BIN_DIR/llvm-dwarfdump`. + pub fn new() -> Self { + let llvm_dwarfdump = llvm_bin_dir().join("llvm-dwarfdump"); + let cmd = Command::new(llvm_dwarfdump); + Self { cmd } + } + + /// Provide an input file. + pub fn input>(&mut self, path: P) -> &mut Self { + self.cmd.arg(path.as_ref()); + self + } +} diff --git a/src/tools/run-make-support/src/lib.rs b/src/tools/run-make-support/src/lib.rs index a44dd00ad7986..4d0c1b0930ced 100644 --- a/src/tools/run-make-support/src/lib.rs +++ b/src/tools/run-make-support/src/lib.rs @@ -49,8 +49,9 @@ pub use c_build::{build_native_dynamic_lib, build_native_static_lib, build_nativ pub use clang::{clang, Clang}; pub use htmldocck::htmldocck; pub use llvm::{ - llvm_ar, llvm_bcanalyzer, llvm_filecheck, llvm_nm, llvm_objdump, llvm_profdata, llvm_readobj, - LlvmAr, LlvmBcanalyzer, LlvmFilecheck, LlvmNm, LlvmObjdump, LlvmProfdata, LlvmReadobj, + llvm_ar, llvm_bcanalyzer, llvm_dwarfdump, llvm_filecheck, llvm_nm, llvm_objdump, llvm_profdata, + llvm_readobj, LlvmAr, LlvmBcanalyzer, LlvmDwarfdump, LlvmFilecheck, LlvmNm, LlvmObjdump, + LlvmProfdata, LlvmReadobj, }; pub use python::python_command; pub use rustc::{aux_build, bare_rustc, rustc, Rustc}; diff --git a/src/tools/tidy/src/allowed_run_make_makefiles.txt b/src/tools/tidy/src/allowed_run_make_makefiles.txt index 14f0a9cd23d21..2d25de46f6e4a 100644 --- a/src/tools/tidy/src/allowed_run_make_makefiles.txt +++ b/src/tools/tidy/src/allowed_run_make_makefiles.txt @@ -16,7 +16,6 @@ run-make/macos-deployment-target/Makefile run-make/min-global-align/Makefile run-make/native-link-modifier-bundle/Makefile run-make/no-alloc-shim/Makefile -run-make/remap-path-prefix-dwarf/Makefile run-make/reproducible-build/Makefile run-make/rlib-format-packed-bundled-libs/Makefile run-make/split-debuginfo/Makefile diff --git a/src/tools/tidy/src/pal.rs b/src/tools/tidy/src/pal.rs index b7ddf47186d99..c650fd0eec6d8 100644 --- a/src/tools/tidy/src/pal.rs +++ b/src/tools/tidy/src/pal.rs @@ -36,6 +36,7 @@ use crate::walk::{filter_dirs, walk}; // Paths that may contain platform-specific code. const EXCEPTION_PATHS: &[&str] = &[ + "library/windows_targets", "library/panic_abort", "library/panic_unwind", "library/unwind", diff --git a/tests/run-make/remap-path-prefix-dwarf/Makefile b/tests/run-make/remap-path-prefix-dwarf/Makefile deleted file mode 100644 index 8905a00ea2878..0000000000000 --- a/tests/run-make/remap-path-prefix-dwarf/Makefile +++ /dev/null @@ -1,112 +0,0 @@ -# This test makes sure that --remap-path-prefix has the expected effects on paths in debuginfo. -# It tests several cases, each of them has a detailed description attached to it. - -# ignore-windows - -include ../tools.mk - -SRC_DIR := $(abspath .) -SRC_DIR_PARENT := $(abspath ..) - -ifeq ($(UNAME),Darwin) - DEBUGINFOOPTS := -Csplit-debuginfo=off -else - DEBUGINFOOPTS := -endif - -all: \ - abs_input_outside_working_dir \ - rel_input_remap_working_dir \ - rel_input_remap_working_dir_scope \ - rel_input_remap_working_dir_parent \ - rel_input_remap_working_dir_child \ - rel_input_remap_working_dir_diagnostics \ - abs_input_inside_working_dir \ - abs_input_inside_working_dir_scope \ - abs_input_outside_working_dir - -# The compiler is called with an *ABSOLUTE PATH* as input, and that absolute path *is* within -# the working directory of the compiler. We are remapping the path that contains `src`. -abs_input_inside_working_dir: - # We explicitly switch to a directory that *is* a prefix of the directory our - # source code is contained in. - cd $(SRC_DIR) && $(RUSTC) $(SRC_DIR)/src/quux.rs -o "$(TMPDIR)/abs_input_inside_working_dir.rlib" -Cdebuginfo=2 --remap-path-prefix $(SRC_DIR)=REMAPPED - # We expect the path to the main source file to be remapped. - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_inside_working_dir.rlib | $(CGREP) "REMAPPED/src/quux.rs" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_inside_working_dir.rlib | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with an *ABSOLUTE PATH* as input, and that absolute path *is* within -# the working directory of the compiler. We are remapping the path that contains `src`. -abs_input_inside_working_dir_scope: - # We explicitly switch to a directory that *is* a prefix of the directory our - # source code is contained in. - cd $(SRC_DIR) && $(RUSTC) $(SRC_DIR)/src/quux.rs -o "$(TMPDIR)/abs_input_inside_working_dir_scope.rlib" -Cdebuginfo=2 --remap-path-prefix $(SRC_DIR)=REMAPPED -Zremap-path-scope=object $(DEBUGINFOOPTS) - # We expect the path to the main source file to be remapped. - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_inside_working_dir_scope.rlib | $(CGREP) "REMAPPED/src/quux.rs" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_inside_working_dir_scope.rlib | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with an *ABSOLUTE PATH* as input, and that absolute path is *not* within -# the working directory of the compiler. We are remapping both the path that contains `src` and -# the working directory to the same thing. This setup corresponds to a workaround that is needed -# when trying to remap everything to something that looks like a local path. -# Relative paths are interpreted as relative to the compiler's working directory (e.g. in -# debuginfo). If we also remap the working directory, the compiler strip it from other paths so -# that the final outcome is the desired one again. -abs_input_outside_working_dir: - # We explicitly switch to a directory that is *not* a prefix of the directory our - # source code is contained in. - cd $(TMPDIR) && $(RUSTC) $(SRC_DIR)/src/quux.rs -o "$(TMPDIR)/abs_input_outside_working_dir.rlib" -Cdebuginfo=2 --remap-path-prefix $(SRC_DIR)=REMAPPED --remap-path-prefix $(TMPDIR)=REMAPPED - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_outside_working_dir.rlib | $(CGREP) "REMAPPED/src/quux.rs" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump $(TMPDIR)/abs_input_outside_working_dir.rlib | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with a *RELATIVE PATH* as input. We are remapping the working directory of -# the compiler, which naturally is an implicit prefix of our relative input path. Debuginfo will -# expand the relative path to an absolute path and we expect the working directory to be remapped -# in that expansion. -rel_input_remap_working_dir: - cd $(SRC_DIR) && $(RUSTC) src/quux.rs -o "$(TMPDIR)/rel_input_remap_working_dir.rlib" -Cdebuginfo=2 --remap-path-prefix "$(SRC_DIR)=REMAPPED" - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir.rlib" | $(CGREP) "REMAPPED/src/quux.rs" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir.rlib" | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with a *RELATIVE PATH* as input. We are remapping the working directory of -# the compiler, which naturally is an implicit prefix of our relative input path. Debuginfo will -# expand the relative path to an absolute path and we expect the working directory to be remapped -# in that expansion. -rel_input_remap_working_dir_scope: - cd $(SRC_DIR) && $(RUSTC) src/quux.rs -o "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" -Cdebuginfo=2 --remap-path-prefix "$(SRC_DIR)=REMAPPED" -Zremap-path-scope=object $(DEBUGINFOOPTS) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" | $(CGREP) "REMAPPED/src/quux.rs" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" | $(CGREP) -v "REMAPPED/REMAPPED" - -rel_input_remap_working_dir_diagnostics: - cd $(SRC_DIR) && $(RUSTC) src/quux.rs -o "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" -Cdebuginfo=2 --remap-path-prefix "$(SRC_DIR)=REMAPPED" -Zremap-path-scope=diagnostics $(DEBUGINFOOPTS) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" | $(CGREP) -v "REMAPPED/src/quux.rs" - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_scope.rlib" | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with a *RELATIVE PATH* as input. We are remapping a *SUB-DIRECTORY* of the -# compiler's working directory. This test makes sure that that directory is remapped even though it -# won't actually show up in this form in the compiler's SourceMap and instead is only constructed -# on demand during debuginfo generation. -rel_input_remap_working_dir_child: - cd $(SRC_DIR) && $(RUSTC) src/quux.rs -o "$(TMPDIR)/rel_input_remap_working_dir_child.rlib" -Cdebuginfo=2 --remap-path-prefix "$(SRC_DIR)/src=REMAPPED" - # We expect `src/quux.rs` to have been remapped to `REMAPPED/quux.rs`. - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_child.rlib" | $(CGREP) "REMAPPED/quux.rs" - # We don't want to find the path that we just remapped anywhere in the DWARF - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_child.rlib" | $(CGREP) -v "$(SRC_DIR)/src" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_child.rlib" | $(CGREP) -v "REMAPPED/REMAPPED" - -# The compiler is called with a *RELATIVE PATH* as input. We are remapping a *PARENT DIRECTORY* of -# the compiler's working directory. -rel_input_remap_working_dir_parent: - cd $(SRC_DIR) && $(RUSTC) src/quux.rs -o "$(TMPDIR)/rel_input_remap_working_dir_parent.rlib" -Cdebuginfo=2 --remap-path-prefix "$(SRC_DIR_PARENT)=REMAPPED" - # We expect `src/quux.rs` to have been remapped to `REMAPPED/remap-path-prefix-dwarf/src/quux.rs`. - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_parent.rlib" | $(CGREP) "REMAPPED/remap-path-prefix-dwarf/src/quux.rs" - # We don't want to find the path that we just remapped anywhere in the DWARF - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_parent.rlib" | $(CGREP) -v "$(SRC_DIR_PARENT)" - # No weird duplication of remapped components (see #78479) - "$(LLVM_BIN_DIR)"/llvm-dwarfdump "$(TMPDIR)/rel_input_remap_working_dir_parent.rlib" | $(CGREP) -v "REMAPPED/REMAPPED" diff --git a/tests/run-make/remap-path-prefix-dwarf/rmake.rs b/tests/run-make/remap-path-prefix-dwarf/rmake.rs new file mode 100644 index 0000000000000..ede1d61574257 --- /dev/null +++ b/tests/run-make/remap-path-prefix-dwarf/rmake.rs @@ -0,0 +1,202 @@ +// This test makes sure that --remap-path-prefix has the expected effects on paths in debuginfo. +// We explicitly switch to a directory that *is* a prefix of the directory our +// source code is contained in. +// It tests several cases, each of them has a detailed description attached to it. +// See https://github.com/rust-lang/rust/pull/96867 + +//@ ignore-windows +// Reason: the remap path prefix is not printed in the dwarf dump. + +use run_make_support::{cwd, is_darwin, llvm_dwarfdump, rust_lib_name, rustc}; + +fn main() { + // The compiler is called with an *ABSOLUTE PATH* as input, and that absolute path *is* within + // the working directory of the compiler. We are remapping the path that contains `src`. + check_dwarf(DwarfTest { + lib_name: "abs_input_inside_working_dir", + input_path: PathType::Absolute, + scope: None, + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().display())), + dwarf_test: DwarfDump::ContainsSrcPath, + }); + check_dwarf(DwarfTest { + lib_name: "abs_input_inside_working_dir_scope", + input_path: PathType::Absolute, + scope: Some(ScopeType::Object), + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().display())), + dwarf_test: DwarfDump::ContainsSrcPath, + }); + // The compiler is called with an *ABSOLUTE PATH* as input, and that absolute path is *not* + // within the working directory of the compiler. We are remapping both the path that contains + // `src` and the working directory to the same thing. This setup corresponds to a workaround + // that is needed when trying to remap everything to something that looks like a local + // path. Relative paths are interpreted as relative to the compiler's working directory (e.g. + // in debuginfo). If we also remap the working directory, the compiler strip it from other + // paths so that the final outcome is the desired one again. + check_dwarf(DwarfTest { + lib_name: "abs_input_outside_working_dir", + input_path: PathType::Absolute, + scope: None, + remap_path_prefix: PrefixType::Dual(( + format!("{}=REMAPPED", cwd().display()), + "rmake_out=REMAPPED".to_owned(), + )), + dwarf_test: DwarfDump::ContainsSrcPath, + }); + // The compiler is called with a *RELATIVE PATH* as input. We are remapping the working + // directory of the compiler, which naturally is an implicit prefix of our relative input path. + // Debuginfo will expand the relative path to an absolute path and we expect the working + // directory to be remapped in that expansion. + check_dwarf(DwarfTest { + lib_name: "rel_input_remap_working_dir", + input_path: PathType::Relative, + scope: None, + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().display())), + dwarf_test: DwarfDump::ContainsSrcPath, + }); + check_dwarf(DwarfTest { + lib_name: "rel_input_remap_working_dir_scope", + input_path: PathType::Relative, + scope: Some(ScopeType::Object), + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().display())), + dwarf_test: DwarfDump::ContainsSrcPath, + }); + check_dwarf(DwarfTest { + lib_name: "rel_input_remap_working_dir_scope", + input_path: PathType::Relative, + scope: Some(ScopeType::Diagnostics), + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().display())), + dwarf_test: DwarfDump::AvoidSrcPath, + }); + // The compiler is called with a *RELATIVE PATH* as input. We are remapping a *SUB-DIRECTORY* + // of the compiler's working directory. This test makes sure that that directory is remapped + // even though it won't actually show up in this form in the compiler's SourceMap and instead + // is only constructed on demand during debuginfo generation. + check_dwarf(DwarfTest { + lib_name: "rel_input_remap_working_dir_child", + input_path: PathType::Relative, + scope: None, + remap_path_prefix: PrefixType::Regular(format!("{}=REMAPPED", cwd().join("src").display())), + dwarf_test: DwarfDump::ChildTest, + }); + // The compiler is called with a *RELATIVE PATH* as input. We are remapping a + // *PARENT DIRECTORY* of the compiler's working directory. + check_dwarf(DwarfTest { + lib_name: "rel_input_remap_working_dir_parent", + input_path: PathType::Relative, + scope: None, + remap_path_prefix: PrefixType::Regular(format!( + "{}=REMAPPED", + cwd().parent().unwrap().display() + )), + dwarf_test: DwarfDump::ParentTest, + }); +} + +#[track_caller] +fn check_dwarf(test: DwarfTest) { + let mut rustc = rustc(); + match test.input_path { + PathType::Absolute => rustc.input(cwd().join("src/quux.rs")), + PathType::Relative => rustc.input("src/quux.rs"), + }; + rustc.output(rust_lib_name(test.lib_name)); + rustc.arg("-Cdebuginfo=2"); + if let Some(scope) = test.scope { + match scope { + ScopeType::Object => rustc.arg("-Zremap-path-scope=object"), + ScopeType::Diagnostics => rustc.arg("-Zremap-path-scope=diagnostics"), + }; + if is_darwin() { + rustc.arg("-Csplit-debuginfo=off"); + } + } + match test.remap_path_prefix { + PrefixType::Regular(prefix) => { + // We explicitly switch to a directory that *is* a prefix of the directory our + // source code is contained in. + rustc.arg("--remap-path-prefix"); + rustc.arg(prefix); + } + PrefixType::Dual((prefix1, prefix2)) => { + // We explicitly switch to a directory that is *not* a prefix of the directory our + // source code is contained in. + rustc.arg("--remap-path-prefix"); + rustc.arg(prefix1); + rustc.arg("--remap-path-prefix"); + rustc.arg(prefix2); + } + } + rustc.run(); + match test.dwarf_test { + DwarfDump::ContainsSrcPath => { + llvm_dwarfdump() + .input(rust_lib_name(test.lib_name)) + .run() + // We expect the path to the main source file to be remapped. + .assert_stdout_contains("REMAPPED/src/quux.rs") + // No weird duplication of remapped components (see #78479) + .assert_stdout_not_contains("REMAPPED/REMAPPED"); + } + DwarfDump::AvoidSrcPath => { + llvm_dwarfdump() + .input(rust_lib_name(test.lib_name)) + .run() + .assert_stdout_not_contains("REMAPPED/src/quux.rs") + .assert_stdout_not_contains("REMAPPED/REMAPPED"); + } + DwarfDump::ChildTest => { + llvm_dwarfdump() + .input(rust_lib_name(test.lib_name)) + .run() + // We expect `src/quux.rs` to have been remapped to `REMAPPED/quux.rs`. + .assert_stdout_contains("REMAPPED/quux.rs") + // We don't want to find the path that we just remapped anywhere in the DWARF + .assert_stdout_not_contains(cwd().join("src").to_str().unwrap()) + // No weird duplication of remapped components (see #78479) + .assert_stdout_not_contains("REMAPPED/REMAPPED"); + } + DwarfDump::ParentTest => { + llvm_dwarfdump() + .input(rust_lib_name(test.lib_name)) + .run() + // We expect `src/quux.rs` to have been remapped to + // `REMAPPED/remap-path-prefix-dwarf/src/quux.rs`. + .assert_stdout_contains("REMAPPED/rmake_out/src/quux.rs") + // We don't want to find the path that we just remapped anywhere in the DWARF + .assert_stdout_not_contains(cwd().parent().unwrap().to_str().unwrap()) + // No weird duplication of remapped components (see #78479) + .assert_stdout_not_contains("REMAPPED/REMAPPED"); + } + }; +} + +struct DwarfTest { + lib_name: &'static str, + input_path: PathType, + scope: Option, + remap_path_prefix: PrefixType, + dwarf_test: DwarfDump, +} + +enum PathType { + Absolute, + Relative, +} + +enum ScopeType { + Object, + Diagnostics, +} + +enum DwarfDump { + ContainsSrcPath, + AvoidSrcPath, + ChildTest, + ParentTest, +} + +enum PrefixType { + Regular(String), + Dual((String, String)), +} diff --git a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.rs b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.rs index f11b94bb036c6..babdbd0a69216 100644 --- a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.rs +++ b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.rs @@ -2,5 +2,6 @@ // option can only be used with HTML generation. //@ compile-flags: -Zunstable-options --generate-link-to-definition --output-format json +//@ check-pass pub fn f() {} diff --git a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.stderr b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.stderr index 4c8c607e7da23..62b0e3ce408b7 100644 --- a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.stderr +++ b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt.stderr @@ -1,2 +1,4 @@ -error: --generate-link-to-definition option can only be used with HTML output format +warning: `--generate-link-to-definition` option can only be used with HTML output format + | + = note: `--generate-link-to-definition` option will be ignored diff --git a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.rs b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.rs deleted file mode 100644 index 718522059799e..0000000000000 --- a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.rs +++ /dev/null @@ -1,6 +0,0 @@ -// This test purpose is to check that the "--generate-link-to-definition" -// option can only be used with HTML generation. - -//@ compile-flags: -Zunstable-options --generate-link-to-definition --show-coverage - -pub fn f() {} diff --git a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.stderr b/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.stderr deleted file mode 100644 index 4c8c607e7da23..0000000000000 --- a/tests/rustdoc-ui/generate-link-to-definition/generate-link-to-definition-opt2.stderr +++ /dev/null @@ -1,2 +0,0 @@ -error: --generate-link-to-definition option can only be used with HTML output format - diff --git a/tests/ui/coroutine/issue-58888.rs b/tests/ui/coroutine/issue-58888.rs index 6266f97ce8c46..e4fada0cd432e 100644 --- a/tests/ui/coroutine/issue-58888.rs +++ b/tests/ui/coroutine/issue-58888.rs @@ -13,7 +13,8 @@ impl Database { } fn check_connection(&self) -> impl Coroutine + '_ { - #[coroutine] move || { + #[coroutine] + move || { let iter = self.get_connection(); for i in iter { yield i @@ -23,5 +24,5 @@ impl Database { } fn main() { - Database.check_connection(); + let _ = Database.check_connection(); } diff --git a/tests/ui/rust-2024/unsafe-env-suggestion.stderr b/tests/ui/rust-2024/unsafe-env-suggestion.stderr index 3aa10a3bed682..1506741f6bc9b 100644 --- a/tests/ui/rust-2024/unsafe-env-suggestion.stderr +++ b/tests/ui/rust-2024/unsafe-env-suggestion.stderr @@ -11,7 +11,7 @@ note: the lint level is defined here | LL | #![deny(deprecated_safe_2024)] | ^^^^^^^^^^^^^^^^^^^^ -help: you can wrap the call in an `unsafe` block if you can guarantee the code is only ever called from single-threaded code +help: you can wrap the call in an `unsafe` block if you can guarantee that the environment access only happens in single-threaded code | LL + // TODO: Audit that the environment access only happens in single-threaded code. LL ~ unsafe { env::set_var("FOO", "BAR") }; @@ -25,7 +25,7 @@ LL | env::remove_var("FOO"); | = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2024! = note: for more information, see issue #27970 -help: you can wrap the call in an `unsafe` block if you can guarantee the code is only ever called from single-threaded code +help: you can wrap the call in an `unsafe` block if you can guarantee that the environment access only happens in single-threaded code | LL + // TODO: Audit that the environment access only happens in single-threaded code. LL ~ unsafe { env::remove_var("FOO") };