Skip to content

feat: improve CPU usage #10088

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Aug 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 6 additions & 19 deletions crates/ide/src/prime_caches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,39 +8,26 @@ use ide_db::base_db::SourceDatabase;

use crate::RootDatabase;

/// We started indexing a crate.
#[derive(Debug)]
pub enum PrimeCachesProgress {
Started,
/// We started indexing a crate.
StartedOnCrate {
on_crate: String,
n_done: usize,
n_total: usize,
},
/// We finished indexing all crates.
Finished,
pub struct PrimeCachesProgress {
pub on_crate: String,
pub n_done: usize,
pub n_total: usize,
}

pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) {
let _p = profile::span("prime_caches");
let graph = db.crate_graph();
let topo = &graph.crates_in_topological_order();

cb(PrimeCachesProgress::Started);
// Take care to emit the finish signal even when the computation is canceled.
let _d = stdx::defer(|| cb(PrimeCachesProgress::Finished));

// FIXME: This would be easy to parallelize, since it's in the ideal ordering for that.
// Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks
// cancellation, so we cannot use rayon.
for (i, &crate_id) in topo.iter().enumerate() {
let crate_name = graph[crate_id].display_name.as_deref().unwrap_or_default().to_string();

cb(PrimeCachesProgress::StartedOnCrate {
on_crate: crate_name,
n_done: i,
n_total: topo.len(),
});
cb(PrimeCachesProgress { on_crate: crate_name, n_done: i, n_total: topo.len() });
db.crate_def_map(crate_id);
db.import_map(crate_id);
}
Expand Down
105 changes: 67 additions & 38 deletions crates/rust-analyzer/src/main_loop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@ use std::{

use always_assert::always;
use crossbeam_channel::{select, Receiver};
use ide::{FileId, PrimeCachesProgress};
use ide_db::base_db::{SourceDatabaseExt, VfsPath};
use lsp_server::{Connection, Notification, Request};
use lsp_types::notification::Notification as _;
use vfs::ChangeKind;
use vfs::{ChangeKind, FileId};

use crate::{
config::Config,
Expand Down Expand Up @@ -67,6 +66,13 @@ pub(crate) enum Task {
FetchBuildData(BuildDataProgress),
}

#[derive(Debug)]
pub(crate) enum PrimeCachesProgress {
Begin,
Report(ide::PrimeCachesProgress),
End { cancelled: bool },
}

impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
Expand Down Expand Up @@ -146,8 +152,10 @@ impl GlobalState {
);
}

self.fetch_workspaces_request();
self.fetch_workspaces_if_needed();
self.fetch_workspaces_queue.request_op();
if self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces();
}

while let Some(event) = self.next_event(&inbox) {
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
Expand Down Expand Up @@ -209,17 +217,17 @@ impl GlobalState {
}
}
Task::PrimeCaches(progress) => match progress {
PrimeCachesProgress::Started => prime_caches_progress.push(progress),
PrimeCachesProgress::StartedOnCrate { .. } => {
PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
PrimeCachesProgress::Report(_) => {
match prime_caches_progress.last_mut() {
Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
Some(last @ PrimeCachesProgress::Report(_)) => {
// Coalesce subsequent update events.
*last = progress;
}
_ => prime_caches_progress.push(progress),
}
}
PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
},
Task::FetchWorkspace(progress) => {
let (state, msg) = match progress {
Expand All @@ -228,14 +236,14 @@ impl GlobalState {
(Progress::Report, Some(msg))
}
ProjectWorkspaceProgress::End(workspaces) => {
self.fetch_workspaces_completed(workspaces);
self.fetch_workspaces_queue.op_completed(workspaces);

let old = Arc::clone(&self.workspaces);
self.switch_workspaces();
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);

if self.config.run_build_scripts() && workspaces_updated {
self.fetch_build_data_request()
self.fetch_build_data_queue.request_op()
}

(Progress::End, None)
Expand All @@ -251,7 +259,7 @@ impl GlobalState {
(Some(Progress::Report), Some(msg))
}
BuildDataProgress::End(build_data_result) => {
self.fetch_build_data_completed(build_data_result);
self.fetch_build_data_queue.op_completed(build_data_result);

self.switch_workspaces();

Expand All @@ -275,22 +283,28 @@ impl GlobalState {
for progress in prime_caches_progress {
let (state, message, fraction);
match progress {
PrimeCachesProgress::Started => {
PrimeCachesProgress::Begin => {
state = Progress::Begin;
message = None;
fraction = 0.0;
}
PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
PrimeCachesProgress::Report(report) => {
state = Progress::Report;
message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
fraction = Progress::fraction(n_done, n_total);
message = Some(format!(
"{}/{} ({})",
report.n_done, report.n_total, report.on_crate
));
fraction = Progress::fraction(report.n_done, report.n_total);
}
PrimeCachesProgress::Finished => {
PrimeCachesProgress::End { cancelled } => {
state = Progress::End;
message = None;
fraction = 1.0;

self.prime_caches_queue.op_completed(());
if cancelled {
self.prime_caches_queue.request_op();
}
}
};

Expand Down Expand Up @@ -413,26 +427,10 @@ impl GlobalState {
for flycheck in &self.flycheck {
flycheck.update();
}
self.prime_caches_queue.request_op();
}

if !was_quiescent || state_changed {
// Ensure that only one cache priming task can run at a time
self.prime_caches_queue.request_op();
if self.prime_caches_queue.should_start_op() {
self.task_pool.handle.spawn_with_sender({
let analysis = self.snapshot().analysis;
move |sender| {
let cb = |progress| {
sender.send(Task::PrimeCaches(progress)).unwrap();
};
match analysis.prime_caches(cb) {
Ok(()) => (),
Err(_canceled) => (),
}
}
});
}

// Refresh semantic tokens if the client supports it.
if self.config.semantic_tokens_refresh() {
self.semantic_tokens_cache.lock().clear();
Expand Down Expand Up @@ -478,11 +476,43 @@ impl GlobalState {
}

if self.config.cargo_autoreload() {
self.fetch_workspaces_if_needed();
if self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces();
}
}
if self.fetch_build_data_queue.should_start_op() {
self.fetch_build_data();
}
if self.prime_caches_queue.should_start_op() {
self.task_pool.handle.spawn_with_sender({
let analysis = self.snapshot().analysis;
move |sender| {
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
let res = analysis.prime_caches(|progress| {
let report = PrimeCachesProgress::Report(progress);
sender.send(Task::PrimeCaches(report)).unwrap();
});
sender
.send(Task::PrimeCaches(PrimeCachesProgress::End {
cancelled: res.is_err(),
}))
.unwrap();
}
});
}
self.fetch_build_data_if_needed();

self.report_new_status_if_needed();
let status = self.current_status();
if self.last_reported_status.as_ref() != Some(&status) {
self.last_reported_status = Some(status.clone());

if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
self.show_message(lsp_types::MessageType::Error, message.clone());
}

if self.config.server_status_notification() {
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
}
}

let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
Expand Down Expand Up @@ -521,8 +551,7 @@ impl GlobalState {

RequestDispatcher { req: Some(req), global_state: self }
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
s.fetch_workspaces_request();
s.fetch_workspaces_if_needed();
s.fetch_workspaces_queue.request_op();
Ok(())
})?
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
Expand Down
49 changes: 7 additions & 42 deletions crates/rust-analyzer/src/reload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ impl GlobalState {
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
}
if self.config.linked_projects() != old_config.linked_projects() {
self.fetch_workspaces_request()
self.fetch_workspaces_queue.request_op()
} else if self.config.flycheck() != old_config.flycheck() {
self.reload_flycheck();
}
Expand All @@ -71,7 +71,7 @@ impl GlobalState {
", "
)
);
self.fetch_workspaces_request();
self.fetch_workspaces_queue.request_op();

fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"];
Expand Down Expand Up @@ -109,7 +109,8 @@ impl GlobalState {
false
}
}
pub(crate) fn report_new_status_if_needed(&mut self) {

pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams {
let mut status = lsp_ext::ServerStatusParams {
health: lsp_ext::Health::Ok,
quiescent: self.is_quiescent(),
Expand All @@ -132,27 +133,10 @@ impl GlobalState {
status.health = lsp_ext::Health::Error;
status.message = Some(error)
}

if self.last_reported_status.as_ref() != Some(&status) {
self.last_reported_status = Some(status.clone());

if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
self.show_message(lsp_types::MessageType::Error, message.clone());
}

if self.config.server_status_notification() {
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
}
}
status
}

pub(crate) fn fetch_workspaces_request(&mut self) {
self.fetch_workspaces_queue.request_op()
}
pub(crate) fn fetch_workspaces_if_needed(&mut self) {
if !self.fetch_workspaces_queue.should_start_op() {
return;
}
pub(crate) fn fetch_workspaces(&mut self) {
tracing::info!("will fetch workspaces");

self.task_pool.handle.spawn_with_sender({
Expand Down Expand Up @@ -203,21 +187,8 @@ impl GlobalState {
}
});
}
pub(crate) fn fetch_workspaces_completed(
&mut self,
workspaces: Vec<anyhow::Result<ProjectWorkspace>>,
) {
self.fetch_workspaces_queue.op_completed(workspaces)
}

pub(crate) fn fetch_build_data_request(&mut self) {
self.fetch_build_data_queue.request_op();
}
pub(crate) fn fetch_build_data_if_needed(&mut self) {
if !self.fetch_build_data_queue.should_start_op() {
return;
}

pub(crate) fn fetch_build_data(&mut self) {
let workspaces = Arc::clone(&self.workspaces);
let config = self.config.cargo();
self.task_pool.handle.spawn_with_sender(move |sender| {
Expand All @@ -236,12 +207,6 @@ impl GlobalState {
sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
});
}
pub(crate) fn fetch_build_data_completed(
&mut self,
build_data: (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>),
) {
self.fetch_build_data_queue.op_completed(build_data)
}

pub(crate) fn switch_workspaces(&mut self) {
let _p = profile::span("GlobalState::switch_workspaces");
Expand Down
19 changes: 19 additions & 0 deletions docs/dev/style.md
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,25 @@ if idx >= len {

**Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out.

## Control Flow

As a special case of the previous rule, do not hide control flow inside functions, push it to the caller:

```rust
// GOOD
if cond {
f()
}

// BAD
fn f() {
if !cond {
return;
}
...
}
```

## Assertions

Assert liberally.
Expand Down