@@ -4,7 +4,7 @@ use std::{
4
4
ops:: Deref ,
5
5
path:: { Path , PathBuf } ,
6
6
sync:: {
7
- atomic:: { AtomicU16 , AtomicUsize , Ordering } ,
7
+ atomic:: { AtomicU16 , Ordering } ,
8
8
Arc ,
9
9
} ,
10
10
time:: SystemTime ,
@@ -86,7 +86,7 @@ impl super::Store {
86
86
Ok ( Some ( self . collect_snapshot ( ) ) )
87
87
} else {
88
88
// always compare to the latest state
89
- // Nothing changed in the mean time , try to load another index…
89
+ // Nothing changed in the meantime , try to load another index…
90
90
if self . load_next_index ( index) {
91
91
Ok ( Some ( self . collect_snapshot ( ) ) )
92
92
} else {
@@ -119,7 +119,7 @@ impl super::Store {
119
119
let slot = & self . files [ index. slot_indices [ slot_map_index] ] ;
120
120
let _lock = slot. write . lock ( ) ;
121
121
if slot. generation . load ( Ordering :: SeqCst ) > index. generation {
122
- // There is a disk consolidation in progress which just overwrote a slot that cold be disposed with some other
122
+ // There is a disk consolidation in progress which just overwrote a slot that could be disposed with some other
123
123
// index, one we didn't intend to load.
124
124
// Continue with the next slot index in the hope there is something else we can do…
125
125
continue ' retry_with_next_slot_index;
@@ -128,14 +128,18 @@ impl super::Store {
128
128
let bundle_mut = Arc :: make_mut ( & mut bundle) ;
129
129
if let Some ( files) = bundle_mut. as_mut ( ) {
130
130
// these are always expected to be set, unless somebody raced us. We handle this later by retrying.
131
- let _loaded_count = IncOnDrop ( & index. loaded_indices ) ;
132
- match files. load_index ( self . object_hash ) {
131
+ let res = {
132
+ let res = files. load_index ( self . object_hash ) ;
133
+ slot. files . store ( bundle) ;
134
+ index. loaded_indices . fetch_add ( 1 , Ordering :: SeqCst ) ;
135
+ res
136
+ } ;
137
+ match res {
133
138
Ok ( _) => {
134
- slot. files . store ( bundle) ;
135
139
break ' retry_with_next_slot_index;
136
140
}
137
- Err ( _ ) => {
138
- slot . files . store ( bundle ) ;
141
+ Err ( _err ) => {
142
+ gix_features :: trace :: error! ( err=?_err , "Failed to load index file - some objects may seem to not exist" ) ;
139
143
continue ' retry_with_next_slot_index;
140
144
}
141
145
}
@@ -145,9 +149,14 @@ impl super::Store {
145
149
// There can be contention as many threads start working at the same time and take all the
146
150
// slots to load indices for. Some threads might just be left-over and have to wait for something
147
151
// to change.
148
- let num_load_operations = index. num_indices_currently_being_loaded . deref ( ) ;
149
152
// TODO: potentially hot loop - could this be a condition variable?
150
- while num_load_operations. load ( Ordering :: Relaxed ) != 0 {
153
+ // This is a timing-based fix for the case that the `num_indices_being_loaded` isn't yet incremented,
154
+ // and we might break out here without actually waiting for the loading operation. Then we'd fail to
155
+ // observe a change and the underlying handler would not have all the indices it needs at its disposal.
156
+ // Yielding means we will definitely loose enough time to observe the ongoing operation,
157
+ // or its effects.
158
+ std:: thread:: yield_now ( ) ;
159
+ while index. num_indices_currently_being_loaded . load ( Ordering :: SeqCst ) != 0 {
151
160
std:: thread:: yield_now ( )
152
161
}
153
162
break ' retry_with_next_slot_index;
@@ -197,7 +206,7 @@ impl super::Store {
197
206
198
207
// We might not be able to detect by pointer if the state changed, as this itself is racy. So we keep track of double-initialization
199
208
// using a flag, which means that if `needs_init` was true we saw the index uninitialized once, but now that we are here it's
200
- // initialized meaning that somebody was faster and we couldn't detect it by comparisons to the index.
209
+ // initialized meaning that somebody was faster, and we couldn't detect it by comparisons to the index.
201
210
// If so, make sure we collect the snapshot instead of returning None in case nothing actually changed, which is likely with a
202
211
// race like this.
203
212
if !was_uninitialized && needs_init {
@@ -397,18 +406,19 @@ impl super::Store {
397
406
// generation stays the same, as it's the same value still but scheduled for eventual removal.
398
407
}
399
408
} else {
409
+ // set the generation before we actually change the value, otherwise readers of old generations could observe the new one.
410
+ // We rather want them to turn around here and update their index, which, by that time, might actually already be available.
411
+ // If not, they would fail unable to load a pack or index they need, but that's preferred over returning wrong objects.
412
+ // Safety: can't race as we hold the lock, have to set the generation beforehand to help avoid others to observe the value.
413
+ slot. generation . store ( generation, Ordering :: SeqCst ) ;
400
414
* files_mut = None ;
401
415
} ;
402
416
slot. files . store ( files) ;
403
- if !needs_stable_indices {
404
- // Not racy due to lock, generation must be set after unsetting the slot value AND storing it.
405
- slot. generation . store ( generation, Ordering :: SeqCst ) ;
406
- }
407
417
}
408
418
409
419
let new_index = self . index . load ( ) ;
410
420
Ok ( if index. state_id ( ) == new_index. state_id ( ) {
411
- // there was no change, and nothing was loaded in the meantime, reflect that in the return value to not get into loops
421
+ // there was no change, and nothing was loaded in the meantime, reflect that in the return value to not get into loops.
412
422
None
413
423
} else {
414
424
if load_new_index {
@@ -619,34 +629,44 @@ impl super::Store {
619
629
}
620
630
621
631
pub ( crate ) fn collect_snapshot ( & self ) -> Snapshot {
632
+ // We don't observe changes-on-disk in our 'wait-for-load' loop.
633
+ // That loop is meant to help assure the marker (which includes the amount of loaded indices) matches
634
+ // the actual amount of indices we collect.
622
635
let index = self . index . load ( ) ;
623
- let indices = if index. is_initialized ( ) {
624
- index
625
- . slot_indices
626
- . iter ( )
627
- . map ( |idx| ( * idx, & self . files [ * idx] ) )
628
- . filter_map ( |( id, file) | {
629
- let lookup = match ( * * file. files . load ( ) ) . as_ref ( ) ? {
630
- types:: IndexAndPacks :: Index ( bundle) => handle:: SingleOrMultiIndex :: Single {
631
- index : bundle. index . loaded ( ) ?. clone ( ) ,
632
- data : bundle. data . loaded ( ) . cloned ( ) ,
633
- } ,
634
- types:: IndexAndPacks :: MultiIndex ( multi) => handle:: SingleOrMultiIndex :: Multi {
635
- index : multi. multi_index . loaded ( ) ?. clone ( ) ,
636
- data : multi. data . iter ( ) . map ( |f| f. loaded ( ) . cloned ( ) ) . collect ( ) ,
637
- } ,
638
- } ;
639
- handle:: IndexLookup { file : lookup, id } . into ( )
640
- } )
641
- . collect ( )
642
- } else {
643
- Vec :: new ( )
644
- } ;
636
+ loop {
637
+ if index. num_indices_currently_being_loaded . deref ( ) . load ( Ordering :: SeqCst ) != 0 {
638
+ std:: thread:: yield_now ( ) ;
639
+ continue ;
640
+ }
641
+ let marker = index. marker ( ) ;
642
+ let indices = if index. is_initialized ( ) {
643
+ index
644
+ . slot_indices
645
+ . iter ( )
646
+ . map ( |idx| ( * idx, & self . files [ * idx] ) )
647
+ . filter_map ( |( id, file) | {
648
+ let lookup = match ( * * file. files . load ( ) ) . as_ref ( ) ? {
649
+ types:: IndexAndPacks :: Index ( bundle) => handle:: SingleOrMultiIndex :: Single {
650
+ index : bundle. index . loaded ( ) ?. clone ( ) ,
651
+ data : bundle. data . loaded ( ) . cloned ( ) ,
652
+ } ,
653
+ types:: IndexAndPacks :: MultiIndex ( multi) => handle:: SingleOrMultiIndex :: Multi {
654
+ index : multi. multi_index . loaded ( ) ?. clone ( ) ,
655
+ data : multi. data . iter ( ) . map ( |f| f. loaded ( ) . cloned ( ) ) . collect ( ) ,
656
+ } ,
657
+ } ;
658
+ handle:: IndexLookup { file : lookup, id } . into ( )
659
+ } )
660
+ . collect ( )
661
+ } else {
662
+ Vec :: new ( )
663
+ } ;
645
664
646
- Snapshot {
647
- indices,
648
- loose_dbs : Arc :: clone ( & index. loose_dbs ) ,
649
- marker : index. marker ( ) ,
665
+ return Snapshot {
666
+ indices,
667
+ loose_dbs : Arc :: clone ( & index. loose_dbs ) ,
668
+ marker,
669
+ } ;
650
670
}
651
671
}
652
672
}
@@ -669,13 +689,6 @@ impl<'a> Drop for IncOnNewAndDecOnDrop<'a> {
669
689
}
670
690
}
671
691
672
- struct IncOnDrop < ' a > ( & ' a AtomicUsize ) ;
673
- impl < ' a > Drop for IncOnDrop < ' a > {
674
- fn drop ( & mut self ) {
675
- self . 0 . fetch_add ( 1 , Ordering :: SeqCst ) ;
676
- }
677
- }
678
-
679
692
pub ( crate ) enum Either {
680
693
IndexPath ( PathBuf ) ,
681
694
MultiIndexFile ( Arc < gix_pack:: multi_index:: File > ) ,
0 commit comments