@@ -39,10 +39,10 @@ fileprivate enum TaskMetadata: DependencyTracker, Equatable {
39
39
// Adding the dependency also elevates the index task's priorities.
40
40
return true
41
41
case ( . index( let lhsUris) , . index( let rhsUris) ) :
42
- // Technically, we should be able to allow simultaneous indexing of the same file. When a file gets re-scheduled
43
- // for indexing, all previous index invocations should get cancelled. But conceptually the code becomes simpler
44
- // if we don't need to think racing indexing tasks for the same file and it shouldn't make a performance impact
45
- // in practice because of the cancellation described before .
42
+ // Technically, we should be able to allow simultaneous indexing of the same file. But conceptually the code
43
+ // becomes simpler if we don't need to think racing indexing tasks for the same file and it shouldn't make a
44
+ // performance impact in practice because if a first task indexes a file, a subsequent index task for the same
45
+ // file will realize that the index is already up-to-date based on the file's mtime and early exit .
46
46
return !lhsUris. intersection ( rhsUris) . isEmpty
47
47
}
48
48
}
@@ -89,7 +89,7 @@ actor SyntacticTestIndex {
89
89
/// Files that have been removed using `removeFileForIndex`.
90
90
///
91
91
/// We need to keep track of these files because when the files get removed, there might be an in-progress indexing
92
- /// operation running for that file. We need to ensure that this indexing operation doesn't write add the removed file
92
+ /// operation running for that file. We need to ensure that this indexing operation doesn't add the removed file
93
93
/// back to `indexTests`.
94
94
private var removedFiles : Set < DocumentURI > = [ ]
95
95
@@ -138,6 +138,10 @@ actor SyntacticTestIndex {
138
138
139
139
/// Called when a list of files was updated. Re-scans those files
140
140
private func rescanFiles( _ uris: [ DocumentURI ] ) {
141
+ // If we scan a file again, it might have been added after being removed before. Remove it from the list of removed
142
+ // files.
143
+ removedFiles. subtract ( uris)
144
+
141
145
// Divide the files into multiple batches. This is more efficient than spawning a new task for every file, mostly
142
146
// because it keeps the number of pending items in `indexingQueue` low and adding a new task to `indexingQueue` is
143
147
// in O(number of pending tasks), since we need to scan for dependency edges to add, which would make scanning files
0 commit comments