5
5
"fmt"
6
6
"net"
7
7
"sort"
8
+ "sync"
8
9
9
10
"github.com/openshift/origin/pkg/sdn/plugin/cniserver"
10
11
"github.com/openshift/origin/pkg/util/netutils"
@@ -21,7 +22,7 @@ import (
21
22
22
23
type podHandler interface {
23
24
setup (req * cniserver.PodRequest ) (* cnitypes.Result , * runningPod , error )
24
- update (req * cniserver.PodRequest ) (* runningPod , error )
25
+ update (req * cniserver.PodRequest ) (uint32 , error )
25
26
teardown (req * cniserver.PodRequest ) error
26
27
}
27
28
@@ -38,7 +39,8 @@ type podManager struct {
38
39
// Request queue for pod operations incoming from the CNIServer
39
40
requests chan (* cniserver.PodRequest )
40
41
// Tracks pod :: IP address for hostport handling
41
- runningPods map [string ]* runningPod
42
+ runningPods map [string ]* runningPod
43
+ runningPodsLock sync.Mutex
42
44
43
45
// Live pod setup/teardown stuff not used in testing code
44
46
kClient * kclientset.Clientset
@@ -99,7 +101,7 @@ func getIPAMConfig(clusterNetwork *net.IPNet, localSubnet string) ([]byte, error
99
101
IPAM * hostLocalIPAM `json:"ipam"`
100
102
}
101
103
102
- mcaddr := net .ParseIP ("224.0.0.0" )
104
+ _ , mcnet , _ := net .ParseCIDR ("224.0.0.0/3 " )
103
105
return json .Marshal (& cniNetworkConfig {
104
106
Name : "openshift-sdn" ,
105
107
Type : "openshift-sdn" ,
@@ -111,19 +113,20 @@ func getIPAMConfig(clusterNetwork *net.IPNet, localSubnet string) ([]byte, error
111
113
},
112
114
Routes : []cnitypes.Route {
113
115
{
116
+ // Default route
114
117
Dst : net.IPNet {
115
118
IP : net .IPv4zero ,
116
119
Mask : net .IPMask (net .IPv4zero ),
117
120
},
118
121
GW : netutils .GenerateDefaultGateway (nodeNet ),
119
122
},
120
- {Dst : * clusterNetwork },
123
+ {
124
+ // Cluster network
125
+ Dst : * clusterNetwork ,
126
+ },
121
127
{
122
128
// Multicast
123
- Dst : net.IPNet {
124
- IP : mcaddr ,
125
- Mask : net .IPMask (mcaddr ),
126
- },
129
+ Dst : * mcnet ,
127
130
},
128
131
},
129
132
},
@@ -179,112 +182,106 @@ func (m *podManager) handleCNIRequest(request *cniserver.PodRequest) ([]byte, er
179
182
return result .Response , result .Err
180
183
}
181
184
182
- type runningPodsSlice []* runningPod
183
-
184
- func (l runningPodsSlice ) Len () int { return len (l ) }
185
- func (l runningPodsSlice ) Less (i , j int ) bool { return l [i ].ofport < l [j ].ofport }
186
- func (l runningPodsSlice ) Swap (i , j int ) { l [i ], l [j ] = l [j ], l [i ] }
187
-
188
- // FIXME: instead of calculating all this ourselves, figure out a way to pass
189
- // the old VNID through the Update() call (or get it from somewhere else).
190
- func updateMulticastFlows (runningPods map [string ]* runningPod , ovs * ovs.Interface , podKey string , changedPod * runningPod ) error {
191
- // FIXME: prevents TestPodUpdate() from crashing. (We separately test this function anyway.)
192
- if ovs == nil {
193
- return nil
194
- }
195
-
196
- // Build map of pods by their VNID, excluding the changed pod
197
- podsByVNID := make (map [uint32 ]runningPodsSlice )
198
- for key , runningPod := range runningPods {
199
- if key != podKey {
200
- podsByVNID [runningPod .vnid ] = append (podsByVNID [runningPod .vnid ], runningPod )
185
+ func localMulticastOutputs (runningPods map [string ]* runningPod , vnid uint32 ) string {
186
+ var ofports []int
187
+ for _ , pod := range runningPods {
188
+ if pod .vnid == vnid {
189
+ ofports = append (ofports , pod .ofport )
201
190
}
202
191
}
192
+ if len (ofports ) == 0 {
193
+ return ""
194
+ }
203
195
204
- // Figure out what two VNIDs changed so we can update only those two flows
205
- changedVNIDs := make ([]uint32 , 0 )
206
- oldPod , exists := runningPods [podKey ]
207
- if changedPod != nil {
208
- podsByVNID [changedPod .vnid ] = append (podsByVNID [changedPod .vnid ], changedPod )
209
- changedVNIDs = append (changedVNIDs , changedPod .vnid )
210
- if exists {
211
- // VNID changed
212
- changedVNIDs = append (changedVNIDs , oldPod .vnid )
196
+ sort .Ints (ofports )
197
+ outputs := ""
198
+ for _ , ofport := range ofports {
199
+ if len (outputs ) > 0 {
200
+ outputs += ","
213
201
}
214
- } else if exists {
215
- // Pod deleted
216
- changedVNIDs = append (changedVNIDs , oldPod .vnid )
202
+ outputs += fmt .Sprintf ("output:%d" , ofport )
217
203
}
204
+ return outputs
205
+ }
218
206
219
- if len (changedVNIDs ) == 0 {
220
- // Shouldn't happen, but whatever
221
- return fmt .Errorf ("Multicast update requested but not required!" )
207
+ func (m * podManager ) updateLocalMulticastRulesWithLock (vnid uint32 ) {
208
+ var outputs string
209
+ otx := m .ovs .NewTransaction ()
210
+ if m .policy .GetMulticastEnabled (vnid ) {
211
+ outputs = localMulticastOutputs (m .runningPods , vnid )
212
+ otx .AddFlow ("table=110, reg0=%d, actions=goto_table:111" , vnid )
213
+ } else {
214
+ otx .DeleteFlows ("table=110, reg0=%d" , vnid )
222
215
}
223
-
224
- otx := ovs .NewTransaction ()
225
- for _ , vnid := range changedVNIDs {
226
- // Sort pod array to ensure consistent ordering for testcases and readability
227
- pods := podsByVNID [vnid ]
228
- sort .Sort (pods )
229
-
230
- // build up list of ports on this VNID
231
- outputs := ""
232
- for _ , pod := range pods {
233
- if len (outputs ) > 0 {
234
- outputs += ","
235
- }
236
- outputs += fmt .Sprintf ("output:%d" , pod .ofport )
237
- }
238
-
239
- // Update or delete the flows for the vnid
240
- if len (outputs ) > 0 {
241
- otx .AddFlow ("table=120, priority=100, reg0=%d, actions=%s" , vnid , outputs )
242
- } else {
243
- otx .DeleteFlows ("table=120, reg0=%d" , vnid )
244
- }
216
+ if len (outputs ) > 0 {
217
+ otx .AddFlow ("table=120, priority=100, reg0=%d, actions=%s" , vnid , outputs )
218
+ } else {
219
+ otx .DeleteFlows ("table=120, reg0=%d" , vnid )
220
+ }
221
+ if err := otx .EndTransaction (); err != nil {
222
+ glog .Errorf ("Error updating OVS multicast flows for VNID %d: %v" , vnid , err )
245
223
}
246
- return otx .EndTransaction ()
224
+ }
225
+
226
+ // Update multicast OVS rules for the given vnid
227
+ func (m * podManager ) UpdateLocalMulticastRules (vnid uint32 ) {
228
+ m .runningPodsLock .Lock ()
229
+ defer m .runningPodsLock .Unlock ()
230
+ m .updateLocalMulticastRulesWithLock (vnid )
247
231
}
248
232
249
233
// Process all CNI requests from the request queue serially. Our OVS interaction
250
234
// and scripts currently cannot run in parallel, and doing so greatly complicates
251
235
// setup/teardown logic
252
236
func (m * podManager ) processCNIRequests () {
253
237
for request := range m .requests {
254
- pk := getPodKey (request )
255
-
256
- var pod * runningPod
257
- var ipamResult * cnitypes.Result
258
-
259
238
glog .V (5 ).Infof ("Processing pod network request %v" , request )
260
- result := & cniserver.PodResult {}
261
- switch request .Command {
262
- case cniserver .CNI_ADD :
263
- ipamResult , pod , result .Err = m .podHandler .setup (request )
264
- if ipamResult != nil {
265
- result .Response , result .Err = json .Marshal (ipamResult )
239
+ result := m .processRequest (request )
240
+ glog .V (5 ).Infof ("Processed pod network request %v, result %s err %v" , request , string (result .Response ), result .Err )
241
+ request .Result <- result
242
+ }
243
+ panic ("stopped processing CNI pod requests!" )
244
+ }
245
+
246
+ func (m * podManager ) processRequest (request * cniserver.PodRequest ) * cniserver.PodResult {
247
+ m .runningPodsLock .Lock ()
248
+ defer m .runningPodsLock .Unlock ()
249
+
250
+ pk := getPodKey (request )
251
+ result := & cniserver.PodResult {}
252
+ switch request .Command {
253
+ case cniserver .CNI_ADD :
254
+ ipamResult , runningPod , err := m .podHandler .setup (request )
255
+ if ipamResult != nil {
256
+ result .Response , err = json .Marshal (ipamResult )
257
+ if result .Err == nil {
258
+ m .runningPods [pk ] = runningPod
259
+ if m .ovs != nil {
260
+ m .updateLocalMulticastRulesWithLock (runningPod .vnid )
261
+ }
266
262
}
267
- case cniserver .CNI_UPDATE :
268
- pod , result .Err = m .podHandler .update (request )
269
- case cniserver .CNI_DEL :
270
- result .Err = m .podHandler .teardown (request )
271
- default :
272
- result .Err = fmt .Errorf ("unhandled CNI request %v" , request .Command )
273
263
}
274
-
275
- if result .Err == nil {
276
- if err := updateMulticastFlows (m .runningPods , m .ovs , pk , pod ); err != nil {
277
- glog .Warningf ("Failed to update multicast flows: %v" , err )
264
+ if err != nil {
265
+ result .Err = err
266
+ }
267
+ case cniserver .CNI_UPDATE :
268
+ vnid , err := m .podHandler .update (request )
269
+ if err == nil {
270
+ if runningPod , exists := m .runningPods [pk ]; exists {
271
+ runningPod .vnid = vnid
278
272
}
279
- if pod != nil {
280
- m .runningPods [pk ] = pod
281
- } else {
282
- delete (m .runningPods , pk )
273
+ }
274
+ result .Err = err
275
+ case cniserver .CNI_DEL :
276
+ if runningPod , exists := m .runningPods [pk ]; exists {
277
+ delete (m .runningPods , pk )
278
+ if m .ovs != nil {
279
+ m .updateLocalMulticastRulesWithLock (runningPod .vnid )
283
280
}
284
281
}
285
-
286
- glog . V ( 5 ). Infof ( "Processed pod network request %v, result %s err %v" , request , string ( result . Response ), result . Err )
287
- request . Result <- result
282
+ result . Err = m . podHandler . teardown ( request )
283
+ default :
284
+ result . Err = fmt . Errorf ( "unhandled CNI request %v" , request . Command )
288
285
}
289
- panic ( "stopped processing CNI pod requests!" )
286
+ return result
290
287
}
0 commit comments