@@ -39,7 +39,6 @@ import (
39
39
"github.com/golang/protobuf/proto"
40
40
dto "github.com/prometheus/client_model/go"
41
41
42
- v1 "k8s.io/api/core/v1"
43
42
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
44
43
"k8s.io/client-go/informers"
45
44
clientv1 "k8s.io/client-go/informers/core/v1"
@@ -48,9 +47,6 @@ import (
48
47
"k8s.io/client-go/tools/cache"
49
48
"k8s.io/klog/v2"
50
49
51
- client "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/controller-versioned/clients"
52
- informerfactory "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/informers/controller-externalversion"
53
- arbclient "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/informers/controller-externalversion/v1"
54
50
"github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/controller/clusterstate/api"
55
51
)
56
52
@@ -64,18 +60,13 @@ type ClusterStateCache struct {
64
60
65
61
kubeclient * kubernetes.Clientset
66
62
67
- podInformer clientv1.PodInformer
68
- nodeInformer clientv1.NodeInformer
69
- schedulingSpecInformer arbclient.SchedulingSpecInformer
63
+ nodeInformer clientv1.NodeInformer
70
64
71
65
Nodes map [string ]* api.NodeInfo
72
66
73
67
availableResources * api.Resource
74
68
availableHistogram * api.ResourceHistogram
75
69
resourceCapacities * api.Resource
76
- deletedJobs * cache.FIFO
77
-
78
- errTasks * cache.FIFO
79
70
}
80
71
81
72
func newClusterStateCache (config * rest.Config ) * ClusterStateCache {
@@ -98,38 +89,6 @@ func newClusterStateCache(config *rest.Config) *ClusterStateCache {
98
89
0 ,
99
90
)
100
91
101
- // create informer for pod information
102
- sc .podInformer = informerFactory .Core ().V1 ().Pods ()
103
- sc .podInformer .Informer ().AddEventHandler (
104
- cache.FilteringResourceEventHandler {
105
- FilterFunc : func (obj interface {}) bool {
106
- switch obj .(type ) {
107
- case * v1.Pod :
108
- pod := obj .(* v1.Pod )
109
- return pod .Status .Phase == v1 .PodRunning
110
- //if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
111
- // return true
112
- //} else {
113
- // return false
114
- //}
115
- default :
116
- return false
117
- }
118
- },
119
- Handler : cache.ResourceEventHandlerFuncs {},
120
- })
121
-
122
- // create queue informer
123
- queueClient , _ , err := client .NewClient (config )
124
- if err != nil {
125
- panic (err )
126
- }
127
-
128
- schedulingSpecInformerFactory := informerfactory .NewSharedInformerFactory (queueClient , 0 )
129
- // create informer for Queue information
130
- sc .schedulingSpecInformer = schedulingSpecInformerFactory .SchedulingSpec ().SchedulingSpecs ()
131
- sc .schedulingSpecInformer .Informer ().AddEventHandler (cache.ResourceEventHandlerFuncs {})
132
-
133
92
sc .availableResources = api .EmptyResource ()
134
93
sc .availableHistogram = api .NewResourceHistogram (api .EmptyResource (), api .EmptyResource ())
135
94
sc .resourceCapacities = api .EmptyResource ()
@@ -139,10 +98,7 @@ func newClusterStateCache(config *rest.Config) *ClusterStateCache {
139
98
140
99
func (sc * ClusterStateCache ) Run (stopCh <- chan struct {}) {
141
100
klog .V (8 ).Infof ("Cluster State Cache started." )
142
-
143
- go sc .podInformer .Informer ().Run (stopCh )
144
101
go sc .nodeInformer .Informer ().Run (stopCh )
145
- go sc .schedulingSpecInformer .Informer ().Run (stopCh )
146
102
147
103
// Update cache
148
104
go sc .updateCache ()
@@ -151,8 +107,6 @@ func (sc *ClusterStateCache) Run(stopCh <-chan struct{}) {
151
107
152
108
func (sc * ClusterStateCache ) WaitForCacheSync (stopCh <- chan struct {}) bool {
153
109
return cache .WaitForCacheSync (stopCh ,
154
- sc .podInformer .Informer ().HasSynced ,
155
- sc .schedulingSpecInformer .Informer ().HasSynced ,
156
110
sc .nodeInformer .Informer ().HasSynced )
157
111
}
158
112
0 commit comments