@@ -41,6 +41,7 @@ import (
41
41
"golang.org/x/tools/gopls/internal/util/astutil"
42
42
"golang.org/x/tools/gopls/internal/util/bug"
43
43
"golang.org/x/tools/gopls/internal/util/frob"
44
+ "golang.org/x/tools/gopls/internal/util/maps"
44
45
"golang.org/x/tools/internal/event"
45
46
"golang.org/x/tools/internal/event/tag"
46
47
"golang.org/x/tools/internal/facts"
@@ -177,8 +178,6 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
177
178
178
179
var tagStr string // sorted comma-separated list of PackageIDs
179
180
{
180
- // TODO(adonovan): replace with a generic map[S]any -> string
181
- // function in the tag package, and use maps.Keys + slices.Sort.
182
181
keys := make ([]string , 0 , len (pkgs ))
183
182
for id := range pkgs {
184
183
keys = append (keys , string (id ))
@@ -303,10 +302,10 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
303
302
}
304
303
// Add edge from predecessor.
305
304
if from != nil {
306
- atomic . AddInt32 ( & from .unfinishedSuccs , 1 ) // TODO(adonovan): use generics
305
+ from .unfinishedSuccs . Add ( + 1 ) // incref
307
306
an .preds = append (an .preds , from )
308
307
}
309
- atomic . AddInt32 ( & an .unfinishedPreds , 1 )
308
+ an .unfinishedPreds . Add ( + 1 )
310
309
return an , nil
311
310
}
312
311
@@ -387,7 +386,7 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
387
386
// prevents workers from enqeuing, and thus finishing, and thus allowing the
388
387
// group to make progress: deadlock.
389
388
limiter := make (chan unit , runtime .GOMAXPROCS (0 ))
390
- var completed int64
389
+ var completed atomic. Int64
391
390
392
391
var enqueue func (* analysisNode )
393
392
enqueue = func (an * analysisNode ) {
@@ -399,13 +398,13 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
399
398
if err != nil {
400
399
return err // cancelled, or failed to produce a package
401
400
}
402
- maybeReport (atomic . AddInt64 ( & completed , 1 ))
401
+ maybeReport (completed . Add ( 1 ))
403
402
an .summary = summary
404
403
405
404
// Notify each waiting predecessor,
406
405
// and enqueue it when it becomes a leaf.
407
406
for _ , pred := range an .preds {
408
- if atomic . AddInt32 ( & pred .unfinishedSuccs , - 1 ) == 0 {
407
+ if pred .unfinishedSuccs . Add ( - 1 ) == 0 { // decref
409
408
enqueue (pred )
410
409
}
411
410
}
@@ -427,6 +426,18 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
427
426
return nil , err // cancelled, or failed to produce a package
428
427
}
429
428
429
+ // Inv: all root nodes now have a summary (#66732).
430
+ //
431
+ // We know this is falsified empirically. This means either
432
+ // the summary was "successfully" set to nil (above), or there
433
+ // is a problem with the graph such the enqueuing leaves does
434
+ // not lead to completion of roots (or an error).
435
+ for _ , root := range roots {
436
+ if root .summary == nil {
437
+ bug .Report ("root analysisNode has nil summary" )
438
+ }
439
+ }
440
+
430
441
// Report diagnostics only from enabled actions that succeeded.
431
442
// Errors from creating or analyzing packages are ignored.
432
443
// Diagnostics are reported in the order of the analyzers argument.
@@ -458,6 +469,7 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
458
469
}
459
470
460
471
// Inv: root.summary is the successful result of run (via runCached).
472
+ // TODO(adonovan): fix: root.summary is sometimes nil! (#66732).
461
473
summary , ok := root .summary .Actions [stableNames [a ]]
462
474
if summary == nil {
463
475
panic (fmt .Sprintf ("analyzeSummary.Actions[%q] = (nil, %t); got %v (#60551)" ,
@@ -475,7 +487,7 @@ func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Pac
475
487
}
476
488
477
489
func (an * analysisNode ) decrefPreds () {
478
- if atomic . AddInt32 ( & an .unfinishedPreds , - 1 ) == 0 {
490
+ if an .unfinishedPreds . Add ( - 1 ) == 0 {
479
491
an .summary .Actions = nil
480
492
}
481
493
}
@@ -510,8 +522,8 @@ type analysisNode struct {
510
522
analyzers []* analysis.Analyzer // set of analyzers to run
511
523
preds []* analysisNode // graph edges:
512
524
succs map [PackageID ]* analysisNode // (preds -> self -> succs)
513
- unfinishedSuccs int32
514
- unfinishedPreds int32 // effectively a summary.Actions refcount
525
+ unfinishedSuccs atomic. Int32
526
+ unfinishedPreds atomic. Int32 // effectively a summary.Actions refcount
515
527
allDeps map [PackagePath ]* analysisNode // all dependencies including self
516
528
exportDeps map [PackagePath ]* analysisNode // subset of allDeps ref'd by export data (+self)
517
529
summary * analyzeSummary // serializable result of analyzing this package
@@ -664,6 +676,9 @@ func (an *analysisNode) runCached(ctx context.Context) (*analyzeSummary, error)
664
676
if data , err := filecache .Get (cacheKind , key ); err == nil {
665
677
// cache hit
666
678
analyzeSummaryCodec .Decode (data , & summary )
679
+ if summary == nil { // debugging #66732
680
+ bug .Reportf ("analyzeSummaryCodec.Decode yielded nil *analyzeSummary" )
681
+ }
667
682
} else if err != filecache .ErrNotFound {
668
683
return nil , bug .Errorf ("internal error reading shared cache: %v" , err )
669
684
} else {
@@ -673,8 +688,11 @@ func (an *analysisNode) runCached(ctx context.Context) (*analyzeSummary, error)
673
688
if err != nil {
674
689
return nil , err
675
690
}
691
+ if summary == nil { // debugging #66732 (can't happen)
692
+ bug .Reportf ("analyzeNode.run returned nil *analyzeSummary" )
693
+ }
676
694
677
- atomic . AddInt32 ( & an .unfinishedPreds , + 1 ) // incref
695
+ an .unfinishedPreds . Add ( + 1 ) // incref
678
696
go func () {
679
697
defer an .decrefPreds () //decref
680
698
@@ -742,13 +760,11 @@ func (an *analysisNode) cacheKey() [sha256.Size]byte {
742
760
}
743
761
744
762
// vdeps, in PackageID order
745
- depIDs := make ([]string , 0 , len (an .succs ))
746
- for depID := range an .succs {
747
- depIDs = append (depIDs , string (depID ))
748
- }
749
- sort .Strings (depIDs ) // TODO(adonovan): avoid conversions by using slices.Sort[PackageID]
763
+ depIDs := maps .Keys (an .succs )
764
+ // TODO(adonovan): use go1.2x slices.Sort(depIDs).
765
+ sort .Slice (depIDs , func (i , j int ) bool { return depIDs [i ] < depIDs [j ] })
750
766
for _ , depID := range depIDs {
751
- vdep := an .succs [PackageID ( depID ) ]
767
+ vdep := an .succs [depID ]
752
768
fmt .Fprintf (hasher , "dep: %s\n " , vdep .mp .PkgPath )
753
769
fmt .Fprintf (hasher , "export: %s\n " , vdep .summary .DeepExportHash )
754
770
0 commit comments