Skip to content

Commit 97f9dd6

Browse files
authored
Merge pull request #11328 from sivchari/replace-atomic-package
🌱 use latest atomic variable instead of old one
2 parents 214ab6d + b9a4ffe commit 97f9dd6

File tree

2 files changed

+15
-13
lines changed

2 files changed

+15
-13
lines changed

test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go

+11-9
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,12 @@ func Test_cache_scale(t *testing.T) {
5353
operationFrequencyForResourceGroup := 10 * time.Millisecond
5454
testDuration := 2 * time.Minute
5555

56-
var createCount uint64
57-
var getCount uint64
58-
var listCount uint64
59-
var deleteCount uint64
56+
var (
57+
createCount atomic.Uint64
58+
getCount atomic.Uint64
59+
listCount atomic.Uint64
60+
deleteCount atomic.Uint64
61+
)
6062

6163
ctx, cancel := context.WithCancel(context.TODO())
6264
defer cancel()
@@ -95,17 +97,17 @@ func Test_cache_scale(t *testing.T) {
9597
err := c.Create(resourceGroup, machine)
9698
if apierrors.IsAlreadyExists(err) {
9799
if err = c.Get(resourceGroup, types.NamespacedName{Name: machineName(item)}, machine); err == nil {
98-
atomic.AddUint64(&getCount, 1)
100+
getCount.Add(1)
99101
continue
100102
}
101103
}
102104
g.Expect(err).ToNot(HaveOccurred())
103-
atomic.AddUint64(&createCount, 1)
105+
createCount.Add(1)
104106
case 1: // list
105107
obj := &cloudv1.CloudMachineList{}
106108
err := c.List(resourceGroup, obj)
107109
g.Expect(err).ToNot(HaveOccurred())
108-
atomic.AddUint64(&listCount, 1)
110+
listCount.Add(1)
109111
case 2: // delete
110112
g.Expect(err).ToNot(HaveOccurred())
111113
machine := &cloudv1.CloudMachine{
@@ -118,7 +120,7 @@ func Test_cache_scale(t *testing.T) {
118120
continue
119121
}
120122
g.Expect(err).ToNot(HaveOccurred())
121-
atomic.AddUint64(&deleteCount, 1)
123+
deleteCount.Add(1)
122124
}
123125

124126
case <-ctx.Done():
@@ -130,7 +132,7 @@ func Test_cache_scale(t *testing.T) {
130132

131133
time.Sleep(testDuration)
132134

133-
t.Log("createCount", createCount, "getCount", getCount, "listCount", listCount, "deleteCount", deleteCount)
135+
t.Log("createCount", createCount.Load(), "getCount", getCount.Load(), "listCount", listCount.Load(), "deleteCount", deleteCount.Load())
134136

135137
cancel()
136138
}

test/infrastructure/inmemory/pkg/runtime/cache/gc.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -41,20 +41,20 @@ func (c *cache) startGarbageCollector(ctx context.Context) error {
4141
ctx = ctrl.LoggerInto(ctx, log)
4242

4343
log.Info("Starting garbage collector queue")
44-
c.garbageCollectorQueue = workqueue.NewTypedRateLimitingQueue[any](workqueue.DefaultTypedControllerRateLimiter[any]())
44+
c.garbageCollectorQueue = workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]())
4545
go func() {
4646
<-ctx.Done()
4747
c.garbageCollectorQueue.ShutDown()
4848
}()
4949

50-
var workers int64
50+
var workers atomic.Int64
5151
go func() {
5252
log.Info("Starting garbage collector workers", "count", c.garbageCollectorConcurrency)
5353
wg := &sync.WaitGroup{}
5454
wg.Add(c.garbageCollectorConcurrency)
5555
for range c.garbageCollectorConcurrency {
5656
go func() {
57-
atomic.AddInt64(&workers, 1)
57+
workers.Add(1)
5858
defer wg.Done()
5959
for c.processGarbageCollectorWorkItem(ctx) {
6060
}
@@ -65,7 +65,7 @@ func (c *cache) startGarbageCollector(ctx context.Context) error {
6565
}()
6666

6767
if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(context.Context) (done bool, err error) {
68-
if atomic.LoadInt64(&workers) < int64(c.garbageCollectorConcurrency) {
68+
if workers.Load() < int64(c.garbageCollectorConcurrency) {
6969
return false, nil
7070
}
7171
return true, nil

0 commit comments

Comments
 (0)