Skip to content

Commit 7d9b839

Browse files
committed
refactor: Clean up cache priming cancellation handling
1 parent fe7b4f2 commit 7d9b839

File tree

7 files changed

+54
-39
lines changed

7 files changed

+54
-39
lines changed

crates/hir/src/symbols.rs

+7-10
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ use hir_def::{
1313
use hir_expand::{HirFileId, name::Name};
1414
use hir_ty::{
1515
db::HirDatabase,
16-
display::{DisplayTarget, HirDisplay, hir_display_with_store},
16+
display::{HirDisplay, hir_display_with_store},
1717
};
1818
use intern::Symbol;
1919
use rustc_hash::FxHashMap;
2020
use syntax::{AstNode, AstPtr, SmolStr, SyntaxNode, SyntaxNodePtr, ToSmolStr, ast::HasName};
2121

22-
use crate::{Module, ModuleDef, Semantics};
22+
use crate::{HasCrate, Module, ModuleDef, Semantics};
2323

2424
pub type FxIndexSet<T> = indexmap::IndexSet<T, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
2525

@@ -66,7 +66,6 @@ pub struct SymbolCollector<'a> {
6666
symbols: FxIndexSet<FileSymbol>,
6767
work: Vec<SymbolCollectorWork>,
6868
current_container_name: Option<SmolStr>,
69-
display_target: DisplayTarget,
7069
}
7170

7271
/// Given a [`ModuleId`] and a [`HirDatabase`], use the DefMap for the module's crate to collect
@@ -78,10 +77,6 @@ impl<'a> SymbolCollector<'a> {
7877
symbols: Default::default(),
7978
work: Default::default(),
8079
current_container_name: None,
81-
display_target: DisplayTarget::from_crate(
82-
db,
83-
*db.all_crates().last().expect("no crate graph present"),
84-
),
8580
}
8681
}
8782

@@ -93,8 +88,7 @@ impl<'a> SymbolCollector<'a> {
9388

9489
pub fn collect(&mut self, module: Module) {
9590
let _p = tracing::info_span!("SymbolCollector::collect", ?module).entered();
96-
tracing::info!(?module, "SymbolCollector::collect",);
97-
self.display_target = module.krate().to_display_target(self.db);
91+
tracing::info!(?module, "SymbolCollector::collect");
9892

9993
// The initial work is the root module we're collecting, additional work will
10094
// be populated as we traverse the module's definitions.
@@ -321,7 +315,10 @@ impl<'a> SymbolCollector<'a> {
321315
let impl_data = self.db.impl_signature(impl_id);
322316
let impl_name = Some(
323317
hir_display_with_store(impl_data.self_ty, &impl_data.store)
324-
.display(self.db, self.display_target)
318+
.display(
319+
self.db,
320+
crate::Impl::from(impl_id).krate(self.db).to_display_target(self.db),
321+
)
325322
.to_smolstr(),
326323
);
327324
self.with_container_name(impl_name, |s| {

crates/ide-db/src/lib.rs

+2-8
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,8 @@ pub type FileRange = FileRangeWrapper<FileId>;
7878

7979
#[salsa::db]
8080
pub struct RootDatabase {
81+
// FIXME: Revisit this commit now that we migrated to the new salsa, given we store arcs in this
82+
// db directly now
8183
// We use `ManuallyDrop` here because every codegen unit that contains a
8284
// `&RootDatabase -> &dyn OtherDatabase` cast will instantiate its drop glue in the vtable,
8385
// which duplicates `Weak::drop` and `Arc::drop` tens of thousands of times, which makes
@@ -234,14 +236,6 @@ impl RootDatabase {
234236
// );
235237
// hir::db::BodyWithSourceMapQuery.in_db_mut(self).set_lru_capacity(2048);
236238
}
237-
238-
pub fn snapshot(&self) -> Self {
239-
Self {
240-
storage: self.storage.clone(),
241-
files: self.files.clone(),
242-
crates_map: self.crates_map.clone(),
243-
}
244-
}
245239
}
246240

247241
#[query_group::query_group]

crates/ide-db/src/prime_caches.rs

+38-13
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ pub fn parallel_prime_caches(
5151
enum ParallelPrimeCacheWorkerProgress {
5252
BeginCrate { crate_id: Crate, crate_name: Symbol },
5353
EndCrate { crate_id: Crate },
54+
Cancelled(Cancelled),
5455
}
5556

5657
// We split off def map computation from other work,
@@ -71,26 +72,32 @@ pub fn parallel_prime_caches(
7172
progress_sender
7273
.send(ParallelPrimeCacheWorkerProgress::BeginCrate { crate_id, crate_name })?;
7374

74-
match kind {
75+
let cancelled = Cancelled::catch(|| match kind {
7576
PrimingPhase::DefMap => _ = db.crate_def_map(crate_id),
7677
PrimingPhase::ImportMap => _ = db.import_map(crate_id),
7778
PrimingPhase::CrateSymbols => _ = db.crate_symbols(crate_id.into()),
78-
}
79+
});
7980

80-
progress_sender.send(ParallelPrimeCacheWorkerProgress::EndCrate { crate_id })?;
81+
match cancelled {
82+
Ok(()) => progress_sender
83+
.send(ParallelPrimeCacheWorkerProgress::EndCrate { crate_id })?,
84+
Err(cancelled) => progress_sender
85+
.send(ParallelPrimeCacheWorkerProgress::Cancelled(cancelled))?,
86+
}
8187
}
8288

8389
Ok::<_, crossbeam_channel::SendError<_>>(())
8490
};
8591

8692
for id in 0..num_worker_threads {
87-
let worker = prime_caches_worker.clone();
88-
let db = db.snapshot();
89-
9093
stdx::thread::Builder::new(stdx::thread::ThreadIntent::Worker)
9194
.allow_leak(true)
9295
.name(format!("PrimeCaches#{id}"))
93-
.spawn(move || Cancelled::catch(|| worker(db.snapshot())))
96+
.spawn({
97+
let worker = prime_caches_worker.clone();
98+
let db = db.clone();
99+
move || worker(db)
100+
})
94101
.expect("failed to spawn thread");
95102
}
96103

@@ -142,9 +149,14 @@ pub fn parallel_prime_caches(
142149
continue;
143150
}
144151
Err(crossbeam_channel::RecvTimeoutError::Disconnected) => {
145-
// our workers may have died from a cancelled task, so we'll check and re-raise here.
146-
db.unwind_if_revision_cancelled();
147-
break;
152+
// all our workers have exited, mark us as finished and exit
153+
cb(ParallelPrimeCachesProgress {
154+
crates_currently_indexing: vec![],
155+
crates_done,
156+
crates_total: crates_done,
157+
work_type: "Indexing",
158+
});
159+
return;
148160
}
149161
};
150162
match worker_progress {
@@ -156,6 +168,10 @@ pub fn parallel_prime_caches(
156168
crates_to_prime.mark_done(crate_id);
157169
crates_done += 1;
158170
}
171+
ParallelPrimeCacheWorkerProgress::Cancelled(cancelled) => {
172+
// Cancelled::throw should probably be public
173+
std::panic::resume_unwind(Box::new(cancelled));
174+
}
159175
};
160176

161177
let progress = ParallelPrimeCachesProgress {
@@ -186,9 +202,14 @@ pub fn parallel_prime_caches(
186202
continue;
187203
}
188204
Err(crossbeam_channel::RecvTimeoutError::Disconnected) => {
189-
// our workers may have died from a cancelled task, so we'll check and re-raise here.
190-
db.unwind_if_revision_cancelled();
191-
break;
205+
// all our workers have exited, mark us as finished and exit
206+
cb(ParallelPrimeCachesProgress {
207+
crates_currently_indexing: vec![],
208+
crates_done,
209+
crates_total: crates_done,
210+
work_type: "Populating symbols",
211+
});
212+
return;
192213
}
193214
};
194215
match worker_progress {
@@ -199,6 +220,10 @@ pub fn parallel_prime_caches(
199220
crates_currently_indexing.swap_remove(&crate_id);
200221
crates_done += 1;
201222
}
223+
ParallelPrimeCacheWorkerProgress::Cancelled(cancelled) => {
224+
// Cancelled::throw should probably be public
225+
std::panic::resume_unwind(Box::new(cancelled));
226+
}
202227
};
203228

204229
let progress = ParallelPrimeCachesProgress {

crates/ide/src/lib.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ impl AnalysisHost {
182182
/// Returns a snapshot of the current state, which you can query for
183183
/// semantic information.
184184
pub fn analysis(&self) -> Analysis {
185-
Analysis { db: self.db.snapshot() }
185+
Analysis { db: self.db.clone() }
186186
}
187187

188188
/// Applies changes to the current state of the world. If there are
@@ -864,7 +864,7 @@ impl Analysis {
864864
where
865865
F: FnOnce(&RootDatabase) -> T + std::panic::UnwindSafe,
866866
{
867-
let snap = self.db.snapshot();
867+
let snap = self.db.clone();
868868
Cancelled::catch(|| f(&snap))
869869
}
870870
}

crates/rust-analyzer/src/cli/analysis_stats.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -701,10 +701,9 @@ impl flags::AnalysisStats {
701701

702702
if self.parallel {
703703
let mut inference_sw = self.stop_watch();
704-
let snap = db.snapshot();
705704
bodies
706705
.par_iter()
707-
.map_with(snap, |snap, &body| {
706+
.map_with(db.clone(), |snap, &body| {
708707
snap.body(body.into());
709708
snap.infer(body.into());
710709
})

crates/rust-analyzer/src/discover.rs

+2-4
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,8 @@ impl CargoParser<DiscoverProjectMessage> for DiscoverProjectParser {
126126
Some(msg)
127127
}
128128
Err(err) => {
129-
let err = DiscoverProjectData::Error {
130-
error: format!("{:#?}\n{}", err, line),
131-
source: None,
132-
};
129+
let err =
130+
DiscoverProjectData::Error { error: format!("{err:#?}\n{line}"), source: None };
133131
Some(DiscoverProjectMessage::new(err))
134132
}
135133
}

crates/stdx/src/thread.rs

+2
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@ impl Builder {
5656
Self { inner: self.inner.stack_size(size), ..self }
5757
}
5858

59+
/// Whether dropping should detach the thread
60+
/// instead of joining it.
5961
#[must_use]
6062
pub fn allow_leak(self, allow_leak: bool) -> Self {
6163
Self { allow_leak, ..self }

0 commit comments

Comments
 (0)