Skip to content

Commit 7c3b952

Browse files
committed
fix: golangci-lint
Signed-off-by: sivchari <[email protected]>
1 parent 7c1b967 commit 7c3b952

File tree

9 files changed

+81
-77
lines changed

9 files changed

+81
-77
lines changed

agent/main.go

+4-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import (
55
"crypto/sha256"
66
"encoding/base64"
77
"fmt"
8-
"github.com/argoproj/gitops-engine/pkg/utils/text"
98
"net/http"
109
"os"
1110
"os/exec"
@@ -15,11 +14,13 @@ import (
1514
"text/tabwriter"
1615
"time"
1716

17+
"github.com/argoproj/gitops-engine/pkg/utils/text"
18+
1819
"github.com/go-logr/logr"
1920
"github.com/spf13/cobra"
2021
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2122
"k8s.io/client-go/tools/clientcmd"
22-
"k8s.io/klog/v2/klogr"
23+
"k8s.io/klog/v2/textlogger"
2324

2425
"github.com/argoproj/gitops-engine/pkg/cache"
2526
"github.com/argoproj/gitops-engine/pkg/engine"
@@ -37,7 +38,7 @@ const (
3738
)
3839

3940
func main() {
40-
log := klogr.New() // Delegates to klog
41+
log := textlogger.NewLogger(textlogger.NewConfig())
4142
err := newCmd(log).Execute()
4243
checkError(err, log)
4344
}

pkg/cache/cluster.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ import (
3131
"k8s.io/client-go/tools/pager"
3232
watchutil "k8s.io/client-go/tools/watch"
3333
"k8s.io/client-go/util/retry"
34-
"k8s.io/klog/v2/klogr"
34+
"k8s.io/klog/v2/textlogger"
3535
"k8s.io/kubectl/pkg/util/openapi"
3636

3737
"github.com/argoproj/gitops-engine/pkg/utils/kube"
@@ -144,7 +144,7 @@ type ListRetryFunc func(err error) bool
144144

145145
// NewClusterCache creates new instance of cluster cache
146146
func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCache {
147-
log := klogr.New()
147+
log := textlogger.NewLogger(textlogger.NewConfig())
148148
cache := &clusterCache{
149149
settings: Settings{ResourceHealthOverride: &noopSettings{}, ResourcesFilter: &noopSettings{}},
150150
apisMeta: make(map[schema.GroupKind]*apiMeta),

pkg/diff/diff_options.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import (
66
"github.com/go-logr/logr"
77
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
88
"k8s.io/apimachinery/pkg/util/managedfields"
9-
"k8s.io/klog/v2/klogr"
9+
"k8s.io/klog/v2/textlogger"
1010
cmdutil "k8s.io/kubectl/pkg/cmd/util"
1111
)
1212

@@ -31,7 +31,7 @@ func applyOptions(opts []Option) options {
3131
ignoreAggregatedRoles: false,
3232
ignoreMutationWebhook: true,
3333
normalizer: GetNoopNormalizer(),
34-
log: klogr.New(),
34+
log: textlogger.NewLogger(textlogger.NewConfig()),
3535
}
3636
for _, opt := range opts {
3737
opt(&o)

pkg/diff/diff_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
"k8s.io/apimachinery/pkg/runtime"
2727
"k8s.io/apimachinery/pkg/util/intstr"
2828
"k8s.io/apimachinery/pkg/util/managedfields"
29-
"k8s.io/klog/v2/klogr"
29+
"k8s.io/klog/v2/textlogger"
3030
openapiproto "k8s.io/kube-openapi/pkg/util/proto"
3131
"sigs.k8s.io/yaml"
3232
)
@@ -1203,7 +1203,7 @@ spec:
12031203

12041204
func diffOptionsForTest() []Option {
12051205
return []Option{
1206-
WithLogr(klogr.New()),
1206+
WithLogr(textlogger.NewLogger(textlogger.NewConfig())),
12071207
IgnoreAggregatedRoles(false),
12081208
}
12091209
}

pkg/engine/engine_options.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package engine
22

33
import (
44
"github.com/go-logr/logr"
5-
"k8s.io/klog/v2/klogr"
5+
"k8s.io/klog/v2/textlogger"
66

77
"github.com/argoproj/gitops-engine/pkg/utils/kube"
88
"github.com/argoproj/gitops-engine/pkg/utils/tracing"
@@ -16,7 +16,7 @@ type options struct {
1616
}
1717

1818
func applyOptions(opts []Option) options {
19-
log := klogr.New()
19+
log := textlogger.NewLogger(textlogger.NewConfig())
2020
o := options{
2121
log: log,
2222
kubectl: &kube.KubectlCmd{

pkg/sync/sync_context.go

+20-20
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"k8s.io/client-go/dynamic"
2323
"k8s.io/client-go/rest"
2424
"k8s.io/client-go/util/retry"
25-
"k8s.io/klog/v2/klogr"
25+
"k8s.io/klog/v2/textlogger"
2626
cmdutil "k8s.io/kubectl/pkg/cmd/util"
2727
"k8s.io/kubectl/pkg/util/openapi"
2828

@@ -233,7 +233,7 @@ func NewSyncContext(
233233
kubectl: kubectl,
234234
resourceOps: resourceOps,
235235
namespace: namespace,
236-
log: klogr.New(),
236+
log: textlogger.NewLogger(textlogger.NewConfig()),
237237
validate: true,
238238
startedAt: time.Now(),
239239
syncRes: map[string]common.ResourceSyncResult{},
@@ -459,8 +459,8 @@ func (sc *syncContext) Sync() {
459459
// if pruned tasks pending deletion, then wait...
460460
prunedTasksPendingDelete := tasks.Filter(func(t *syncTask) bool {
461461
if t.pruned() && t.liveObj != nil {
462-
return t.liveObj.GetDeletionTimestamp() != nil
463-
}
462+
return t.liveObj.GetDeletionTimestamp() != nil
463+
}
464464
return false
465465
})
466466
if prunedTasksPendingDelete.Len() > 0 {
@@ -761,31 +761,31 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
761761
// for prune tasks, modify the waves for proper cleanup i.e reverse of sync wave (creation order)
762762
pruneTasks := make(map[int][]*syncTask)
763763
for _, task := range tasks {
764-
if task.isPrune() {
765-
pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task)
766-
}
764+
if task.isPrune() {
765+
pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task)
766+
}
767767
}
768-
768+
769769
var uniquePruneWaves []int
770770
for k := range pruneTasks {
771-
uniquePruneWaves = append(uniquePruneWaves, k)
771+
uniquePruneWaves = append(uniquePruneWaves, k)
772772
}
773773
sort.Ints(uniquePruneWaves)
774-
774+
775775
// reorder waves for pruning tasks using symmetric swap on prune waves
776776
n := len(uniquePruneWaves)
777777
for i := 0; i < n/2; i++ {
778-
// waves to swap
779-
startWave := uniquePruneWaves[i]
780-
endWave := uniquePruneWaves[n-1-i]
781-
782-
for _, task := range pruneTasks[startWave] {
778+
// waves to swap
779+
startWave := uniquePruneWaves[i]
780+
endWave := uniquePruneWaves[n-1-i]
781+
782+
for _, task := range pruneTasks[startWave] {
783783
task.waveOverride = &endWave
784-
}
785-
786-
for _, task := range pruneTasks[endWave] {
784+
}
785+
786+
for _, task := range pruneTasks[endWave] {
787787
task.waveOverride = &startWave
788-
}
788+
}
789789
}
790790

791791
// for pruneLast tasks, modify the wave to sync phase last wave of tasks + 1
@@ -926,7 +926,7 @@ func (sc *syncContext) setOperationPhase(phase common.OperationPhase, message st
926926

927927
// ensureCRDReady waits until specified CRD is ready (established condition is true).
928928
func (sc *syncContext) ensureCRDReady(name string) error {
929-
return wait.PollImmediate(time.Duration(100)*time.Millisecond, crdReadinessTimeout, func() (bool, error) {
929+
return wait.PollUntilContextTimeout(context.Background(), time.Duration(100)*time.Millisecond, crdReadinessTimeout, true, func(ctx context.Context) (bool, error) {
930930
crd, err := sc.extensionsclientset.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{})
931931
if err != nil {
932932
return false, err

pkg/sync/sync_context_test.go

+45-42
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
"k8s.io/client-go/dynamic/fake"
2727
"k8s.io/client-go/rest"
2828
testcore "k8s.io/client-go/testing"
29-
"k8s.io/klog/v2/klogr"
29+
"k8s.io/klog/v2/textlogger"
3030

3131
"github.com/argoproj/gitops-engine/pkg/diff"
3232
"github.com/argoproj/gitops-engine/pkg/health"
@@ -63,7 +63,7 @@ func newTestSyncCtx(getResourceFunc *func(ctx context.Context, config *rest.Conf
6363
namespace: FakeArgoCDNamespace,
6464
revision: "FooBarBaz",
6565
disco: fakeDisco,
66-
log: klogr.New().WithValues("application", "fake-app"),
66+
log: textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app"),
6767
resources: map[kube.ResourceKey]reconciledResource{},
6868
syncRes: map[string]synccommon.ResourceSyncResult{},
6969
validate: true,
@@ -1431,23 +1431,26 @@ func Test_syncContext_hasCRDOfGroupKind(t *testing.T) {
14311431
}
14321432

14331433
func Test_setRunningPhase_healthyState(t *testing.T) {
1434-
sc := syncContext{log: klogr.New().WithValues("application", "fake-app")}
1434+
var sc syncContext
1435+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
14351436

14361437
sc.setRunningPhase([]*syncTask{{targetObj: NewPod()}, {targetObj: NewPod()}, {targetObj: NewPod()}}, false)
14371438

14381439
assert.Equal(t, "waiting for healthy state of /Pod/my-pod and 2 more resources", sc.message)
14391440
}
14401441

14411442
func Test_setRunningPhase_runningHooks(t *testing.T) {
1442-
sc := syncContext{log: klogr.New().WithValues("application", "fake-app")}
1443+
var sc syncContext
1444+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
14431445

14441446
sc.setRunningPhase([]*syncTask{{targetObj: newHook(synccommon.HookTypeSyncFail)}}, false)
14451447

14461448
assert.Equal(t, "waiting for completion of hook /Pod/my-pod", sc.message)
14471449
}
14481450

14491451
func Test_setRunningPhase_pendingDeletion(t *testing.T) {
1450-
sc := syncContext{log: klogr.New().WithValues("application", "fake-app")}
1452+
var sc syncContext
1453+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
14511454

14521455
sc.setRunningPhase([]*syncTask{{targetObj: NewPod()}, {targetObj: NewPod()}, {targetObj: NewPod()}}, true)
14531456

@@ -1653,7 +1656,7 @@ func TestSyncContext_GetDeleteOptions_WithPrunePropagationPolicy(t *testing.T) {
16531656

16541657
func TestSetOperationFailed(t *testing.T) {
16551658
sc := syncContext{}
1656-
sc.log = klogr.New().WithValues("application", "fake-app")
1659+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
16571660

16581661
tasks := make([]*syncTask, 0)
16591662
tasks = append(tasks, &syncTask{message: "namespace not found"})
@@ -1666,7 +1669,7 @@ func TestSetOperationFailed(t *testing.T) {
16661669

16671670
func TestSetOperationFailedDuplicatedMessages(t *testing.T) {
16681671
sc := syncContext{}
1669-
sc.log = klogr.New().WithValues("application", "fake-app")
1672+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
16701673

16711674
tasks := make([]*syncTask, 0)
16721675
tasks = append(tasks, &syncTask{message: "namespace not found"})
@@ -1680,7 +1683,7 @@ func TestSetOperationFailedDuplicatedMessages(t *testing.T) {
16801683

16811684
func TestSetOperationFailedNoTasks(t *testing.T) {
16821685
sc := syncContext{}
1683-
sc.log = klogr.New().WithValues("application", "fake-app")
1686+
sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app")
16841687

16851688
sc.setOperationFailed(nil, nil, "one or more objects failed to apply")
16861689

@@ -1771,11 +1774,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
17711774
// no change in wave order
17721775
expectedWaveOrder: map[string]int{
17731776
// new wave // original wave
1774-
ns.GetName(): 0, // 0
1775-
pod1.GetName(): 1, // 1
1776-
pod2.GetName(): 2, // 2
1777-
pod3.GetName(): 3, // 3
1778-
pod4.GetName(): 4, // 4
1777+
ns.GetName(): 0, // 0
1778+
pod1.GetName(): 1, // 1
1779+
pod2.GetName(): 2, // 2
1780+
pod3.GetName(): 3, // 3
1781+
pod4.GetName(): 4, // 4
17791782
},
17801783
},
17811784
{
@@ -1785,11 +1788,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
17851788
// change in prune wave order
17861789
expectedWaveOrder: map[string]int{
17871790
// new wave // original wave
1788-
ns.GetName(): 4, // 0
1789-
pod1.GetName(): 3, // 1
1790-
pod2.GetName(): 2, // 2
1791-
pod3.GetName(): 1, // 3
1792-
pod4.GetName(): 0, // 4
1791+
ns.GetName(): 4, // 0
1792+
pod1.GetName(): 3, // 1
1793+
pod2.GetName(): 2, // 2
1794+
pod3.GetName(): 1, // 3
1795+
pod4.GetName(): 0, // 4
17931796
},
17941797
},
17951798
{
@@ -1799,13 +1802,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
17991802
// change in prune wave order
18001803
expectedWaveOrder: map[string]int{
18011804
// new wave // original wave
1802-
pod1.GetName(): 4, // 1
1803-
pod3.GetName(): 3, // 3
1804-
pod4.GetName(): 1, // 4
1805+
pod1.GetName(): 4, // 1
1806+
pod3.GetName(): 3, // 3
1807+
pod4.GetName(): 1, // 4
18051808

18061809
// no change since non prune tasks
1807-
ns.GetName(): 0, // 0
1808-
pod2.GetName(): 2, // 2
1810+
ns.GetName(): 0, // 0
1811+
pod2.GetName(): 2, // 2
18091812
},
18101813
},
18111814
}
@@ -1830,13 +1833,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
18301833
// change in prune wave order
18311834
expectedWaveOrder: map[string]int{
18321835
// new wave // original wave
1833-
pod1.GetName(): 5, // 1
1834-
pod2.GetName(): 5, // 2
1835-
pod3.GetName(): 5, // 3
1836-
pod4.GetName(): 5, // 4
1836+
pod1.GetName(): 5, // 1
1837+
pod2.GetName(): 5, // 2
1838+
pod3.GetName(): 5, // 3
1839+
pod4.GetName(): 5, // 4
18371840

18381841
// no change since non prune tasks
1839-
ns.GetName(): 0, // 0
1842+
ns.GetName(): 0, // 0
18401843
},
18411844
},
18421845
{
@@ -1847,13 +1850,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
18471850
// change in wave order
18481851
expectedWaveOrder: map[string]int{
18491852
// new wave // original wave
1850-
pod1.GetName(): 4, // 1
1851-
pod2.GetName(): 5, // 2
1852-
pod3.GetName(): 2, // 3
1853-
pod4.GetName(): 1, // 4
1853+
pod1.GetName(): 4, // 1
1854+
pod2.GetName(): 5, // 2
1855+
pod3.GetName(): 2, // 3
1856+
pod4.GetName(): 1, // 4
18541857

18551858
// no change since non prune tasks
1856-
ns.GetName(): 0, // 0
1859+
ns.GetName(): 0, // 0
18571860
},
18581861
},
18591862
}
@@ -1877,11 +1880,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
18771880
// change in prune wave order
18781881
expectedWaveOrder: map[string]int{
18791882
// new wave // original wave
1880-
pod1.GetName(): 5, // 1
1881-
pod3.GetName(): 4, // 3
1882-
pod4.GetName(): 4, // 3
1883-
pod5.GetName(): 3, // 4
1884-
pod7.GetName(): 1, // 5
1883+
pod1.GetName(): 5, // 1
1884+
pod3.GetName(): 4, // 3
1885+
pod4.GetName(): 4, // 3
1886+
pod5.GetName(): 3, // 4
1887+
pod7.GetName(): 1, // 5
18851888

18861889
// no change since non prune tasks
18871890
ns.GetName(): -1, // -1
@@ -1941,8 +1944,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) {
19411944

19421945
// simulate successful delete of pod3
19431946
syncCtx.resources = groupResources(ReconciliationResult{
1944-
Target: []*unstructured.Unstructured{nil, nil, },
1945-
Live: []*unstructured.Unstructured{pod1, pod2, },
1947+
Target: []*unstructured.Unstructured{nil, nil},
1948+
Live: []*unstructured.Unstructured{pod1, pod2},
19461949
})
19471950

19481951
// next sync should prune only pod2
@@ -1966,8 +1969,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) {
19661969

19671970
// simulate successful delete of pod2
19681971
syncCtx.resources = groupResources(ReconciliationResult{
1969-
Target: []*unstructured.Unstructured{nil, },
1970-
Live: []*unstructured.Unstructured{pod1, },
1972+
Target: []*unstructured.Unstructured{nil},
1973+
Live: []*unstructured.Unstructured{pod1},
19711974
})
19721975

19731976
// next sync should proceed with next wave

0 commit comments

Comments
 (0)