Skip to content

Commit e610f2a

Browse files
Merge pull request #16692 from dcbw/sdn-sync-hostports-less
Automatic merge from submit-queue (batch tested with PRs 16741, 16692). sdn: only sync HostPorts when we need to Which is the first time a pod is started, when there will be active hostports, or when there are current active hostports. Otherwise the syncer runs iptables-restore for no good reason. @openshift/networking @knobunc @danwinship
2 parents 7f854d9 + b3fc39c commit e610f2a

File tree

1 file changed

+35
-10
lines changed

1 file changed

+35
-10
lines changed

pkg/network/node/pod.go

+35-10
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ type podManager struct {
7272
ovs *ovsController
7373

7474
enableHostports bool
75+
// true if hostports have been synced at least once
76+
hostportsSynced bool
77+
// true if at least one running pod has a hostport mapping
78+
activeHostports bool
7579

7680
// Things only accessed through the processCNIRequests() goroutine
7781
// and thus can be set from Start()
@@ -188,12 +192,33 @@ func (m *podManager) getPod(request *cniserver.PodRequest) *kubehostport.PodPort
188192
}
189193

190194
// Return a list of Kubernetes RunningPod objects for hostport operations
191-
func (m *podManager) getRunningPods() []*kubehostport.PodPortMapping {
192-
pods := make([]*kubehostport.PodPortMapping, 0)
195+
func (m *podManager) shouldSyncHostports(newPod *kubehostport.PodPortMapping) []*kubehostport.PodPortMapping {
196+
if m.hostportSyncer == nil {
197+
return nil
198+
}
199+
200+
newActiveHostports := false
201+
mappings := make([]*kubehostport.PodPortMapping, 0)
193202
for _, runningPod := range m.runningPods {
194-
pods = append(pods, runningPod.podPortMapping)
203+
mappings = append(mappings, runningPod.podPortMapping)
204+
if !newActiveHostports && len(runningPod.podPortMapping.PortMappings) > 0 {
205+
newActiveHostports = true
206+
}
207+
}
208+
if newPod != nil && len(newPod.PortMappings) > 0 {
209+
newActiveHostports = true
195210
}
196-
return pods
211+
212+
// Sync the first time a pod is started (to clear out stale mappings
213+
// if kubelet crashed), or when there are any/will be active hostports.
214+
// Otherwise don't bother.
215+
if !m.hostportsSynced || m.activeHostports || newActiveHostports {
216+
m.hostportsSynced = true
217+
m.activeHostports = newActiveHostports
218+
return mappings
219+
}
220+
221+
return nil
197222
}
198223

199224
// Add a request to the podManager CNI request queue
@@ -513,8 +538,8 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
513538
defer func() {
514539
if !success {
515540
m.ipamDel(req.SandboxID)
516-
if m.hostportSyncer != nil {
517-
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
541+
if mappings := m.shouldSyncHostports(nil); mappings != nil {
542+
if err := m.hostportSyncer.SyncHostports(Tun0, mappings); err != nil {
518543
glog.Warningf("failed syncing hostports: %v", err)
519544
}
520545
}
@@ -527,8 +552,8 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
527552
return nil, nil, err
528553
}
529554
podPortMapping := kubehostport.ConstructPodPortMapping(&v1Pod, podIP)
530-
if m.hostportSyncer != nil {
531-
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
555+
if mappings := m.shouldSyncHostports(podPortMapping); mappings != nil {
556+
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, mappings); err != nil {
532557
return nil, nil, err
533558
}
534559
}
@@ -651,8 +676,8 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error {
651676
errList = append(errList, err)
652677
}
653678

654-
if m.hostportSyncer != nil {
655-
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
679+
if mappings := m.shouldSyncHostports(nil); mappings != nil {
680+
if err := m.hostportSyncer.SyncHostports(Tun0, mappings); err != nil {
656681
errList = append(errList, err)
657682
}
658683
}

0 commit comments

Comments
 (0)