From d51f5cfc1caee90317739c45d0c9993f547c22c9 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 12 Mar 2023 10:55:52 +0000 Subject: [PATCH 1/8] Verify dep-graph consistency when loading it. Always recover from duplicate DepNode. --- .../src/dep_graph/serialized.rs | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index a4fb0a5b07220..b91c9a346f60c 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -252,8 +252,27 @@ impl SerializedDepGraph { .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default())) .collect(); + let mut duplicates = Vec::new(); for (idx, node) in nodes.iter_enumerated() { - index[node.kind.as_usize()].insert(node.hash, idx); + if index[node.kind.as_usize()].insert(node.hash, idx).is_some() { + duplicates.push(node); + } + } + + // Creating the index detected a duplicated DepNode. + // + // If the new session presents us with a DepNode among those, we have no + // way to know which SerializedDepNodeIndex it corresponds to. To avoid + // making the wrong connection between a DepNodeIndex and a SerializedDepNodeIndex, + // we remove all the duplicates from the index. + // + // This way, when the new session presents us with a DepNode among the duplicates, + // we just create a new node with no counterpart in the previous graph. + // + // Red/green marking still works for those nodes, as that algorithm does not + // need to know about DepNode at all. + for node in duplicates { + index[node.kind.as_usize()].remove(&node.hash); } Arc::new(SerializedDepGraph { From d3bcb8f137bff9aa7717eb0dfbea4277097ab94f Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 12 Mar 2023 10:56:20 +0000 Subject: [PATCH 2/8] Only use the new node hashmap for anonymous nodes. --- compiler/rustc_codegen_ssa/src/base.rs | 6 -- .../rustc_query_system/src/dep_graph/graph.rs | 95 +++++++------------ 2 files changed, 33 insertions(+), 68 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 27c9cb0b31edd..c188be4e416ab 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -1088,12 +1088,6 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> // know that later). If we are not doing LTO, there is only one optimized // version of each module, so we re-use that. let dep_node = cgu.codegen_dep_node(tcx); - assert!( - !tcx.dep_graph.dep_node_exists(&dep_node), - "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", - cgu.name() - ); - if tcx.try_mark_green(&dep_node) { // We can re-use either the pre- or the post-thinlto state. If no LTO is // being performed then we can use post-LTO artifacts, otherwise we must diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 9b26a5bbcc6ce..45e554c328d03 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -345,18 +345,6 @@ impl DepGraphData { task: fn(Ctxt, A) -> R, hash_result: Option, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { - // If the following assertion triggers, it can have two reasons: - // 1. Something is wrong with DepNode creation, either here or - // in `DepGraph::try_mark_green()`. - // 2. Two distinct query keys get mapped to the same `DepNode` - // (see for example #48923). - assert!( - !self.dep_node_exists(&key), - "forcing query with already existing `DepNode`\n\ - - query-key: {arg:?}\n\ - - dep-node: {key:?}" - ); - let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg)); let (result, edges) = if cx.dep_context().is_eval_always(key.kind) { (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new()) @@ -443,7 +431,31 @@ impl DepGraphData { hash: self.current.anon_id_seed.combine(hasher.finish()).into(), }; - self.current.intern_new_node(target_dep_node, task_deps, Fingerprint::ZERO) + // The DepNodes generated by the process above are not unique. 2 queries could + // have exactly the same dependencies. However, deserialization does not handle + // duplicated nodes, so we do the deduplication here directly. + // + // As anonymous nodes are a small quantity compared to the full dep-graph, the + // memory impact of this `anon_node_to_index` map remains tolerable, and helps + // us avoid useless growth of the graph with almost-equivalent nodes. + match self + .current + .anon_node_to_index + .get_shard_by_value(&target_dep_node) + .lock() + .entry(target_dep_node) + { + Entry::Occupied(entry) => *entry.get(), + Entry::Vacant(entry) => { + let dep_node_index = self.current.intern_new_node( + target_dep_node, + task_deps, + Fingerprint::ZERO, + ); + entry.insert(dep_node_index); + dep_node_index + } + } } }; @@ -607,20 +619,6 @@ impl DepGraph { } impl DepGraphData { - #[inline] - fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { - if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { - self.current.prev_index_to_index.lock()[prev_index] - } else { - self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied() - } - } - - #[inline] - fn dep_node_exists(&self, dep_node: &DepNode) -> bool { - self.dep_node_index_of_opt(dep_node).is_some() - } - fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { self.colors.get(prev_index) @@ -653,11 +651,6 @@ impl DepGraphData { } impl DepGraph { - #[inline] - pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { - self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node)) - } - /// Checks whether a previous work product exists for `v` and, if /// so, return the path that leads to it. Used to skip doing work. pub fn previous_work_product(&self, v: &WorkProductId) -> Option { @@ -1025,24 +1018,24 @@ rustc_index::newtype_index! { /// largest in the compiler. /// /// For this reason, we avoid storing `DepNode`s more than once as map -/// keys. The `new_node_to_index` map only contains nodes not in the previous +/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous /// graph, and we map nodes in the previous graph to indices via a two-step /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, /// and the `prev_index_to_index` vector (which is more compact and faster than /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`. /// -/// This struct uses three locks internally. The `data`, `new_node_to_index`, +/// This struct uses three locks internally. The `data`, `anon_node_to_index`, /// and `prev_index_to_index` fields are locked separately. Operations that take /// a `DepNodeIndex` typically just access the `data` field. /// /// We only need to manipulate at most two locks simultaneously: -/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When -/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` +/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When +/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index` /// first, and `data` second. pub(super) struct CurrentDepGraph { encoder: GraphEncoder, - new_node_to_index: Sharded>, prev_index_to_index: Lock>>, + anon_node_to_index: Sharded>, /// This is used to verify that fingerprints do not change between the creation of a node /// and its recomputation. @@ -1110,7 +1103,7 @@ impl CurrentDepGraph { profiler, previous, ), - new_node_to_index: Sharded::new(|| { + anon_node_to_index: Sharded::new(|| { FxHashMap::with_capacity_and_hasher( new_node_count_estimate / sharded::shards(), Default::default(), @@ -1145,14 +1138,7 @@ impl CurrentDepGraph { edges: EdgesVec, current_fingerprint: Fingerprint, ) -> DepNodeIndex { - let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) { - Entry::Occupied(entry) => *entry.get(), - Entry::Vacant(entry) => { - let dep_node_index = self.encoder.send(key, current_fingerprint, edges); - entry.insert(dep_node_index); - dep_node_index - } - }; + let dep_node_index = self.encoder.send(key, current_fingerprint, edges); #[cfg(debug_assertions)] self.record_edge(dep_node_index, key, current_fingerprint); @@ -1222,8 +1208,6 @@ impl CurrentDepGraph { prev_graph: &SerializedDepGraph, prev_index: SerializedDepNodeIndex, ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); - let mut prev_index_to_index = self.prev_index_to_index.lock(); match prev_index_to_index[prev_index] { @@ -1241,19 +1225,6 @@ impl CurrentDepGraph { } } } - - #[inline] - fn debug_assert_not_in_new_nodes( - &self, - prev_graph: &SerializedDepGraph, - prev_index: SerializedDepNodeIndex, - ) { - let node = &prev_graph.index_to_node(prev_index); - debug_assert!( - !self.new_node_to_index.lock_shard_by_value(node).contains_key(node), - "node from previous graph present in new node collection" - ); - } } #[derive(Debug, Clone, Copy)] @@ -1375,7 +1346,7 @@ fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepN if dep_node.is_none() { // Try to find it among the new nodes - for shard in data.current.new_node_to_index.lock_shards() { + for shard in data.current.anon_node_to_index.lock_shards() { if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) { dep_node = Some(*node); break; From db353ca7e4037758e79a5a8937b522cc47f9459f Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 12 Mar 2023 10:07:33 +0000 Subject: [PATCH 3/8] Bless test for constant queries. As the hash function for the input is not complete, those queries can trigger a re-use of a DepNode. As we now tolerate having duplicated DepNode, we effectively remove this ICE. --- tests/incremental/issue-101518.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/incremental/issue-101518.rs b/tests/incremental/issue-101518.rs index 02f4f5d42e74d..d626b4eea1c17 100644 --- a/tests/incremental/issue-101518.rs +++ b/tests/incremental/issue-101518.rs @@ -1,4 +1,11 @@ -//@ revisions: cpass +// This test creates 2 constants that have the same contents but a different AllocId. +// When hashed, those two contants would have the same stable hash. From the point of view +// of the query system, the two calls to `try_destructure_mir_constant` would have the same +// `DepNode` but with different inputs. +// +// This test verifies that the query system does not ICE in such cases. +// +//@ revisions: cpass1 cpass2 #[derive(PartialEq, Eq)] struct Id<'a> { @@ -21,4 +28,7 @@ fn visit_struct2() { } } -fn main() {} +fn main() { + visit_struct(); + visit_struct2(); +} From 545ec634db8a0947ca6f3718da93fe2b703f14a8 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 13 Apr 2023 16:24:34 +0000 Subject: [PATCH 4/8] Add a flag to check depnodes for collisions. --- .../rustc_incremental/src/persist/save.rs | 2 +- .../rustc_query_system/src/dep_graph/graph.rs | 30 +++++++++++++++---- compiler/rustc_session/src/options.rs | 2 ++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 53726df459797..ae4d2c9872762 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -173,7 +173,7 @@ pub(crate) fn build_dep_graph( sess.opts.dep_tracking_hash(false).encode(&mut encoder); Some(DepGraph::new( - &sess.prof, + sess, prev_graph, prev_work_products, encoder, diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 45e554c328d03..d3eb7cd10746c 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -8,7 +8,7 @@ use std::sync::atomic::Ordering; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef}; +use rustc_data_structures::profiling::QueryInvocationId; use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc}; @@ -16,6 +16,7 @@ use rustc_data_structures::unord::UnordMap; use rustc_index::IndexVec; use rustc_macros::{Decodable, Encodable}; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; +use rustc_session::Session; use tracing::{debug, instrument}; #[cfg(debug_assertions)] use {super::debug::EdgeFilter, std::env}; @@ -119,7 +120,7 @@ where impl DepGraph { pub fn new( - profiler: &SelfProfilerRef, + session: &Session, prev_graph: Arc, prev_work_products: WorkProductMap, encoder: FileEncoder, @@ -129,7 +130,7 @@ impl DepGraph { let prev_graph_node_count = prev_graph.node_count(); let current = CurrentDepGraph::new( - profiler, + session, prev_graph_node_count, encoder, record_graph, @@ -1047,6 +1048,11 @@ pub(super) struct CurrentDepGraph { #[cfg(debug_assertions)] forbidden_edge: Option, + /// Used to verify the absence of hash collisions among DepNodes. + /// This field is only `Some` if the `-Z incremental_verify_depnodes` option is present. + #[cfg(debug_assertions)] + seen_dep_nodes: Option>>, + /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// their edges. This has the beneficial side-effect that multiple anonymous /// nodes can be coalesced into one without changing the semantics of the @@ -1068,7 +1074,7 @@ pub(super) struct CurrentDepGraph { impl CurrentDepGraph { fn new( - profiler: &SelfProfilerRef, + session: &Session, prev_graph_node_count: usize, encoder: FileEncoder, record_graph: bool, @@ -1100,7 +1106,7 @@ impl CurrentDepGraph { prev_graph_node_count, record_graph, record_stats, - profiler, + &session.prof, previous, ), anon_node_to_index: Sharded::new(|| { @@ -1115,6 +1121,13 @@ impl CurrentDepGraph { forbidden_edge, #[cfg(debug_assertions)] fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)), + #[cfg(debug_assertions)] + seen_dep_nodes: session.opts.unstable_opts.incremental_verify_depnodes.then(|| { + Lock::new(FxHashSet::with_capacity_and_hasher( + new_node_count_estimate, + Default::default(), + )) + }), total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), } @@ -1143,6 +1156,13 @@ impl CurrentDepGraph { #[cfg(debug_assertions)] self.record_edge(dep_node_index, key, current_fingerprint); + #[cfg(debug_assertions)] + if let Some(ref seen_dep_nodes) = self.seen_dep_nodes { + if !seen_dep_nodes.lock().insert(key) { + panic!("Found duplicate dep-node {key:?}"); + } + } + dep_node_index } diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index edee7b4468cd0..10c381452f458 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -1820,6 +1820,8 @@ options! { incremental_info: bool = (false, parse_bool, [UNTRACKED], "print high-level information about incremental reuse (or the lack thereof) \ (default: no)"), + incremental_verify_depnodes: bool = (false, parse_bool, [UNTRACKED], + "verify incr. comp. dep-nodes for hash collisions (default: no)"), incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED], "verify extended properties for incr. comp. (default: no): - hashes of green query instances From 1851fc42cc0fb4dade9e269f57ea3f70365acfaa Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 20 Apr 2023 18:13:07 +0000 Subject: [PATCH 5/8] Reintroduce assertion. --- compiler/rustc_codegen_ssa/src/base.rs | 7 ++++ .../rustc_query_system/src/dep_graph/graph.rs | 32 +++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index c188be4e416ab..d20baff90c195 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -1088,6 +1088,13 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> // know that later). If we are not doing LTO, there is only one optimized // version of each module, so we re-use that. let dep_node = cgu.codegen_dep_node(tcx); + tcx.dep_graph.assert_nonexistent_node(&dep_node, || { + format!( + "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", + cgu.name() + ) + }); + if tcx.try_mark_green(&dep_node) { // We can re-use either the pre- or the post-thinlto state. If no LTO is // being performed then we can use post-LTO artifacts, otherwise we must diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index d3eb7cd10746c..08dcb23ec94e3 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -346,6 +346,14 @@ impl DepGraphData { task: fn(Ctxt, A) -> R, hash_result: Option, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { + self.assert_nonexistent_node(&key, || { + format!( + "forcing query with already existing `DepNode`\n\ + - query-key: {arg:?}\n\ + - dep-node: {key:?}" + ) + }); + let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg)); let (result, edges) = if cx.dep_context().is_eval_always(key.kind) { (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new()) @@ -620,6 +628,18 @@ impl DepGraph { } impl DepGraphData { + fn assert_nonexistent_node( + &self, + _dep_node: &DepNode, + _msg: impl FnOnce() -> S, + ) { + #[cfg(debug_assertions)] + if let Some(seen_dep_nodes) = &self.current.seen_dep_nodes { + let seen = seen_dep_nodes.lock().contains(_dep_node); + assert!(!seen, "{}", _msg()); + } + } + fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { self.colors.get(prev_index) @@ -914,6 +934,18 @@ impl DepGraph { self.node_color(dep_node).is_some_and(|c| c.is_green()) } + pub fn assert_nonexistent_node( + &self, + dep_node: &DepNode, + msg: impl FnOnce() -> S, + ) { + if cfg!(debug_assertions) + && let Some(data) = &self.data + { + data.assert_nonexistent_node(dep_node, msg) + } + } + /// This method loads all on-disk cacheable query results into memory, so /// they can be written out to the new cache file again. Most query results /// will already be in memory but in the case where we marked something as From ddcc71b69cc84657cd1ec4f430c8aa7c4f489bec Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sat, 22 Apr 2023 15:09:05 +0000 Subject: [PATCH 6/8] Reuse -Zincremental_verify_ich. --- compiler/rustc_query_system/src/dep_graph/graph.rs | 4 ++-- compiler/rustc_session/src/options.rs | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 08dcb23ec94e3..2a4d6cf210025 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1081,7 +1081,7 @@ pub(super) struct CurrentDepGraph { forbidden_edge: Option, /// Used to verify the absence of hash collisions among DepNodes. - /// This field is only `Some` if the `-Z incremental_verify_depnodes` option is present. + /// This field is only `Some` if the `-Z incremental_verify_ich` option is present. #[cfg(debug_assertions)] seen_dep_nodes: Option>>, @@ -1154,7 +1154,7 @@ impl CurrentDepGraph { #[cfg(debug_assertions)] fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)), #[cfg(debug_assertions)] - seen_dep_nodes: session.opts.unstable_opts.incremental_verify_depnodes.then(|| { + seen_dep_nodes: session.opts.unstable_opts.incremental_verify_ich.then(|| { Lock::new(FxHashSet::with_capacity_and_hasher( new_node_count_estimate, Default::default(), diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 10c381452f458..5212ed673d498 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -1820,12 +1820,11 @@ options! { incremental_info: bool = (false, parse_bool, [UNTRACKED], "print high-level information about incremental reuse (or the lack thereof) \ (default: no)"), - incremental_verify_depnodes: bool = (false, parse_bool, [UNTRACKED], - "verify incr. comp. dep-nodes for hash collisions (default: no)"), incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED], "verify extended properties for incr. comp. (default: no): - hashes of green query instances - - hash collisions of query keys"), + - hash collisions of query keys + - hash collisions when creating dep-nodes"), inline_in_all_cgus: Option = (None, parse_opt_bool, [TRACKED], "control whether `#[inline]` functions are in all CGUs"), inline_llvm: bool = (true, parse_bool, [TRACKED], From 694b77342191f59d568f7446849184bede45b1d2 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 4 Jun 2023 09:16:37 +0000 Subject: [PATCH 7/8] Do not fetch the DepNode to mark nodes green. --- compiler/rustc_codegen_ssa/src/base.rs | 2 +- .../rustc_query_system/src/dep_graph/edges.rs | 7 +++ .../rustc_query_system/src/dep_graph/graph.rs | 50 +++++++------------ 3 files changed, 27 insertions(+), 32 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index d20baff90c195..0c91582e5332b 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -1088,7 +1088,7 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> // know that later). If we are not doing LTO, there is only one optimized // version of each module, so we re-use that. let dep_node = cgu.codegen_dep_node(tcx); - tcx.dep_graph.assert_nonexistent_node(&dep_node, || { + tcx.dep_graph.assert_nonexistent_node(dep_node, || { format!( "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", cgu.name() diff --git a/compiler/rustc_query_system/src/dep_graph/edges.rs b/compiler/rustc_query_system/src/dep_graph/edges.rs index 9a3763bd4eeb4..41ce64f11b78e 100644 --- a/compiler/rustc_query_system/src/dep_graph/edges.rs +++ b/compiler/rustc_query_system/src/dep_graph/edges.rs @@ -26,6 +26,13 @@ impl EdgesVec { Self::default() } + #[inline] + pub(crate) fn eval_always() -> Self { + let mut vec = EdgesVec::new(); + vec.push(DepNodeIndex::FOREVER_RED_NODE); + vec + } + #[inline] pub(crate) fn push(&mut self, edge: DepNodeIndex) { self.max = self.max.max(edge.as_u32()); diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 2a4d6cf210025..4919ba5e59352 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -346,7 +346,7 @@ impl DepGraphData { task: fn(Ctxt, A) -> R, hash_result: Option, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { - self.assert_nonexistent_node(&key, || { + self.assert_nonexistent_node(key, || { format!( "forcing query with already existing `DepNode`\n\ - query-key: {arg:?}\n\ @@ -356,7 +356,7 @@ impl DepGraphData { let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg)); let (result, edges) = if cx.dep_context().is_eval_always(key.kind) { - (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new()) + (with_deps(TaskDepsRef::EvalAlways), EdgesVec::eval_always()) } else { let task_deps = Lock::new(TaskDeps { #[cfg(debug_assertions)] @@ -630,12 +630,12 @@ impl DepGraph { impl DepGraphData { fn assert_nonexistent_node( &self, - _dep_node: &DepNode, + _dep_node: DepNode, _msg: impl FnOnce() -> S, ) { #[cfg(debug_assertions)] if let Some(seen_dep_nodes) = &self.current.seen_dep_nodes { - let seen = seen_dep_nodes.lock().contains(_dep_node); + let seen = seen_dep_nodes.lock().contains(&_dep_node); assert!(!seen, "{}", _msg()); } } @@ -748,7 +748,7 @@ impl DepGraphData { // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. - self.try_mark_previous_green(qcx, prev_index, dep_node, None) + self.try_mark_previous_green(qcx, prev_index, None) .map(|dep_node_index| (prev_index, dep_node_index)) } } @@ -762,14 +762,14 @@ impl DepGraphData { frame: Option<&MarkFrame<'_>>, ) -> Option<()> { let dep_dep_node_color = self.colors.get(parent_dep_node_index); - let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index); + let dep_dep_node = || self.previous.index_to_node(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. - debug!("dependency {dep_dep_node:?} was immediately green"); + debug!("dependency {:?} was immediately green", dep_dep_node()); return Some(()); } Some(DepNodeColor::Red) => { @@ -777,32 +777,25 @@ impl DepGraphData { // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother // with checking any of the other dependencies. - debug!("dependency {dep_dep_node:?} was immediately red"); + debug!("dependency {:?} was immediately red", dep_dep_node()); return None; } None => {} } - // We don't know the state of this dependency. If it isn't - // an eval_always node, let's try to mark it green recursively. - if !qcx.dep_context().is_eval_always(dep_dep_node.kind) { - debug!( - "state of dependency {:?} ({}) is unknown, trying to mark it green", - dep_dep_node, dep_dep_node.hash, - ); - - let node_index = - self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame); + // We don't know the state of this dependency. Let's try to mark it green recursively. + debug!("state of dependency {:?} is unknown, trying to mark it green", dep_dep_node()); + let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, frame); - if node_index.is_some() { - debug!("managed to MARK dependency {dep_dep_node:?} as green",); - return Some(()); - } + if node_index.is_some() { + debug!("managed to MARK dependency {:?} as green", dep_dep_node()); + return Some(()); } // We failed to mark it green, so we try to force the query. + let dep_dep_node = dep_dep_node(); debug!("trying to force dependency {dep_dep_node:?}"); - if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, frame) { + if !qcx.dep_context().try_force_from_dep_node(dep_dep_node, frame) { // The DepNode could not be forced. debug!("dependency {dep_dep_node:?} could not be forced"); return None; @@ -846,15 +839,10 @@ impl DepGraphData { &self, qcx: Qcx, prev_dep_node_index: SerializedDepNodeIndex, - dep_node: &DepNode, frame: Option<&MarkFrame<'_>>, ) -> Option { let frame = MarkFrame { index: prev_dep_node_index, parent: frame }; - - // We never try to mark eval_always nodes as green - debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); - - debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node); + let dep_node = || self.previous.index_to_node(prev_dep_node_index); let prev_deps = self.previous.edge_targets_from(prev_dep_node_index); @@ -889,7 +877,7 @@ impl DepGraphData { // Multiple threads can all write the same color here self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); - debug!("successfully marked {dep_node:?} as green"); + debug!("successfully marked {:?} as green", dep_node()); Some(dep_node_index) } @@ -936,7 +924,7 @@ impl DepGraph { pub fn assert_nonexistent_node( &self, - dep_node: &DepNode, + dep_node: DepNode, msg: impl FnOnce() -> S, ) { if cfg!(debug_assertions) From 27568ed46973aef31a42b7ddacabb63d95cbffc5 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 18 Aug 2024 11:35:00 +0000 Subject: [PATCH 8/8] Silence lint. --- compiler/rustc_query_system/src/dep_graph/graph.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 4919ba5e59352..570de4a3912e3 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1245,7 +1245,7 @@ impl CurrentDepGraph { fn promote_node_and_deps_to_current( &self, - prev_graph: &SerializedDepGraph, + _prev_graph: &SerializedDepGraph, prev_index: SerializedDepNodeIndex, ) -> DepNodeIndex { let mut prev_index_to_index = self.prev_index_to_index.lock(); @@ -1258,8 +1258,8 @@ impl CurrentDepGraph { #[cfg(debug_assertions)] self.record_edge( dep_node_index, - prev_graph.index_to_node(prev_index), - prev_graph.fingerprint_by_index(prev_index), + _prev_graph.index_to_node(prev_index), + _prev_graph.fingerprint_by_index(prev_index), ); dep_node_index }