Skip to content

Move diagnostics out from QueryJob and optimize for the case with no diagnostics #57113

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/librustc/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -696,7 +696,7 @@ impl DepGraph {

// Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone());
.store_diagnostics(dep_node_index, diagnostics.clone().into());

for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
Expand Down
15 changes: 12 additions & 3 deletions src/librustc/ty/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1673,6 +1673,7 @@ impl<'gcx> GlobalCtxt<'gcx> {
let new_icx = ty::tls::ImplicitCtxt {
tcx,
query: icx.query.clone(),
diagnostics: icx.diagnostics,
layout_depth: icx.layout_depth,
task_deps: icx.task_deps,
};
Expand Down Expand Up @@ -1782,6 +1783,7 @@ pub mod tls {
use errors::{Diagnostic, TRACK_DIAGNOSTICS};
use rustc_data_structures::OnDrop;
use rustc_data_structures::sync::{self, Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
use dep_graph::TaskDeps;

#[cfg(not(parallel_queries))]
Expand All @@ -1801,10 +1803,14 @@ pub mod tls {
/// by `enter_local` with a new local interner
pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>,

/// The current query job, if any. This is updated by start_job in
/// The current query job, if any. This is updated by JobOwner::start in
/// ty::query::plumbing when executing a query
pub query: Option<Lrc<query::QueryJob<'gcx>>>,

/// Where to store diagnostics for the current query job, if any.
/// This is updated by JobOwner::start in ty::query::plumbing when executing a query
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,

/// Used to prevent layout from recursing too deeply.
pub layout_depth: usize,

Expand Down Expand Up @@ -1870,8 +1876,9 @@ pub mod tls {
fn track_diagnostic(diagnostic: &Diagnostic) {
with_context_opt(|icx| {
if let Some(icx) = icx {
if let Some(ref query) = icx.query {
query.diagnostics.lock().push(diagnostic.clone());
if let Some(ref diagnostics) = icx.diagnostics {
let mut diagnostics = diagnostics.lock();
diagnostics.extend(Some(diagnostic.clone()));
}
}
})
Expand Down Expand Up @@ -1938,6 +1945,7 @@ pub mod tls {
let icx = ImplicitCtxt {
tcx,
query: None,
diagnostics: None,
layout_depth: 0,
task_deps: None,
};
Expand Down Expand Up @@ -1967,6 +1975,7 @@ pub mod tls {
};
let icx = ImplicitCtxt {
query: None,
diagnostics: None,
tcx,
layout_depth: 0,
task_deps: None,
Expand Down
5 changes: 0 additions & 5 deletions src/librustc/ty/query/job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ use ty::query::{
config::QueryDescription,
};
use ty::context::TyCtxt;
use errors::Diagnostic;
use std::process;
use std::{fmt, ptr};

Expand Down Expand Up @@ -54,9 +53,6 @@ pub struct QueryJob<'tcx> {
/// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<Lrc<QueryJob<'tcx>>>,

/// Diagnostic messages which are emitted while the query executes
pub diagnostics: Lock<Vec<Diagnostic>>,

/// The latch which is used to wait on this job
#[cfg(parallel_queries)]
latch: QueryLatch<'tcx>,
Expand All @@ -66,7 +62,6 @@ impl<'tcx> QueryJob<'tcx> {
/// Creates a new query job
pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
QueryJob {
diagnostics: Lock::new(Vec::new()),
info,
parent,
#[cfg(parallel_queries)]
Expand Down
17 changes: 10 additions & 7 deletions src/librustc/ty/query/on_disk_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use ich::{CachingSourceMapView, Fingerprint};
use mir::{self, interpret};
use mir::interpret::{AllocDecodingSession, AllocDecodingState};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once};
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque,
Expand Down Expand Up @@ -341,11 +342,13 @@ impl<'sess> OnDiskCache<'sess> {
/// Store a diagnostic emitted during the current compilation session.
/// Anything stored like this will be available via `load_diagnostics` in
/// the next compilation session.
#[inline(never)]
#[cold]
pub fn store_diagnostics(&self,
dep_node_index: DepNodeIndex,
diagnostics: Vec<Diagnostic>) {
diagnostics: ThinVec<Diagnostic>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut();
let prev = current_diagnostics.insert(dep_node_index, diagnostics);
let prev = current_diagnostics.insert(dep_node_index, diagnostics.into());
debug_assert!(prev.is_none());
}

Expand All @@ -367,16 +370,16 @@ impl<'sess> OnDiskCache<'sess> {
/// Since many anonymous queries can share the same `DepNode`, we aggregate
/// them -- as opposed to regular queries where we assume that there is a
/// 1:1 relationship between query-key and `DepNode`.
#[inline(never)]
#[cold]
pub fn store_diagnostics_for_anon_node(&self,
dep_node_index: DepNodeIndex,
mut diagnostics: Vec<Diagnostic>) {
diagnostics: ThinVec<Diagnostic>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut();

let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| {
mem::replace(&mut diagnostics, Vec::new())
});
let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new());

x.extend(diagnostics.into_iter());
x.extend(Into::<Vec<_>>::into(diagnostics));
}

fn load_indexed<'tcx, T>(&self,
Expand Down
74 changes: 44 additions & 30 deletions src/librustc/ty/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use util::common::{profq_msg, ProfileQueriesMsg, QueryMsg};

use rustc_data_structures::fx::{FxHashMap};
use rustc_data_structures::sync::{Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
use std::mem;
use std::ptr;
use std::collections::hash_map::Entry;
Expand Down Expand Up @@ -195,19 +196,21 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
pub(super) fn start<'lcx, F, R>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: F)
-> (R, Vec<Diagnostic>)
-> R
where
F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R
{
// The TyCtxt stored in TLS has the same global interner lifetime
// as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes
// when accessing the ImplicitCtxt
let r = tls::with_related_context(tcx, move |current_icx| {
tls::with_related_context(tcx, move |current_icx| {
// Update the ImplicitCtxt to point to our new query job
let new_icx = tls::ImplicitCtxt {
tcx: tcx.global_tcx(),
query: Some(self.job.clone()),
diagnostics,
layout_depth: current_icx.layout_depth,
task_deps: current_icx.task_deps,
};
Expand All @@ -216,13 +219,19 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
tls::enter_context(&new_icx, |_| {
compute(tcx)
})
});
})
}

// Extract the diagnostic from the job
let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new());
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: Another lock gone here too.

}

(r, diagnostics)
}
#[inline(always)]
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
{
let diagnostics = Lock::new(ThinVec::new());
let result = f(Some(&diagnostics));
(result, diagnostics.into_inner())
}

impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
Expand Down Expand Up @@ -402,20 +411,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));

let res = job.start(self, |tcx| {
tcx.dep_graph.with_anon_task(dep_node.kind, || {
Q::compute(tcx.global_tcx(), key)
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
job.start(self, diagnostics, |tcx| {
tcx.dep_graph.with_anon_task(dep_node.kind, || {
Q::compute(tcx.global_tcx(), key)
})
})
});

self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
let ((result, dep_node_index), diagnostics) = res;

self.dep_graph.read_index(dep_node_index);

self.queries.on_disk_cache
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
if unlikely!(!diagnostics.is_empty()) {
self.queries.on_disk_cache
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}

job.complete(&result, dep_node_index);

Expand Down Expand Up @@ -487,7 +499,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// The diagnostics for this query have already been
// promoted to the current session during
// try_mark_green(), so we can ignore them here.
let (result, _) = job.start(self, |tcx| {
let result = job.start(self, None, |tcx| {
// The dep-graph for this computation is already in
// place
tcx.dep_graph.with_ignore(|| {
Expand Down Expand Up @@ -566,32 +578,34 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));

let res = job.start(self, |tcx| {
if dep_node.kind.is_eval_always() {
tcx.dep_graph.with_eval_always_task(dep_node,
tcx,
key,
Q::compute)
} else {
tcx.dep_graph.with_task(dep_node,
tcx,
key,
Q::compute)
}
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
job.start(self, diagnostics, |tcx| {
if dep_node.kind.is_eval_always() {
tcx.dep_graph.with_eval_always_task(dep_node,
tcx,
key,
Q::compute)
} else {
tcx.dep_graph.with_task(dep_node,
tcx,
key,
Q::compute)
}
})
});

self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);

let ((result, dep_node_index), diagnostics) = res;

if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
}

if dep_node.kind != ::dep_graph::DepKind::Null {
self.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics);
if unlikely!(!diagnostics.is_empty()) {
self.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics);
}
}

job.complete(&result, dep_node_index);
Expand Down