Skip to content

Commit d120195

Browse files
Merge pull request kubernetes#25151 from sttts/sttts-nodeport-sync-3.11
Bug 1753649: UPSTREAM: 89937: portAllocator sync local data before allocate Origin-commit: d548b192d8b9e38402772509c8f0abc60ac7069c
2 parents d9a5bec + c1007de commit d120195

File tree

4 files changed

+161
-32
lines changed

4 files changed

+161
-32
lines changed

pkg/registry/core/service/allocator/storage/storage.go

+22-32
Original file line numberDiff line numberDiff line change
@@ -76,17 +76,12 @@ func NewEtcd(alloc allocator.Snapshottable, baseKey string, resource schema.Grou
7676
}
7777
}
7878

79-
// Allocate attempts to allocate the item locally and then in etcd.
79+
// Allocate attempts to allocate the item.
8080
func (e *Etcd) Allocate(offset int) (bool, error) {
8181
e.lock.Lock()
8282
defer e.lock.Unlock()
8383

84-
ok, err := e.alloc.Allocate(offset)
85-
if !ok || err != nil {
86-
return ok, err
87-
}
88-
89-
err = e.tryUpdate(func() error {
84+
err := e.tryUpdate(func() error {
9085
ok, err := e.alloc.Allocate(offset)
9186
if err != nil {
9287
return err
@@ -105,49 +100,44 @@ func (e *Etcd) Allocate(offset int) (bool, error) {
105100
return true, nil
106101
}
107102

108-
// AllocateNext attempts to allocate the next item locally and then in etcd.
103+
// AllocateNext attempts to allocate the next item.
109104
func (e *Etcd) AllocateNext() (int, bool, error) {
110105
e.lock.Lock()
111106
defer e.lock.Unlock()
112-
113-
offset, ok, err := e.alloc.AllocateNext()
114-
if !ok || err != nil {
115-
return offset, ok, err
116-
}
107+
var offset int
108+
var ok bool
109+
var err error
117110

118111
err = e.tryUpdate(func() error {
119-
ok, err := e.alloc.Allocate(offset)
112+
// update the offset here
113+
offset, ok, err = e.alloc.AllocateNext()
120114
if err != nil {
121115
return err
122116
}
123117
if !ok {
124-
// update the offset here
125-
offset, ok, err = e.alloc.AllocateNext()
126-
if err != nil {
127-
return err
128-
}
129-
if !ok {
130-
return errorUnableToAllocate
131-
}
132-
return nil
118+
return errorUnableToAllocate
133119
}
134120
return nil
135121
})
136-
return offset, ok, err
122+
123+
if err != nil {
124+
if err == errorUnableToAllocate {
125+
return offset, false, nil
126+
}
127+
return offset, false, err
128+
}
129+
return offset, true, nil
137130
}
138131

139-
// Release attempts to release the provided item locally and then in etcd.
132+
// Release attempts to release the provided item.
140133
func (e *Etcd) Release(item int) error {
141134
e.lock.Lock()
142135
defer e.lock.Unlock()
143136

144-
if err := e.alloc.Release(item); err != nil {
145-
return err
146-
}
147-
148137
return e.tryUpdate(func() error {
149138
return e.alloc.Release(item)
150139
})
140+
151141
}
152142

153143
func (e *Etcd) ForEach(fn func(int)) {
@@ -168,9 +158,9 @@ func (e *Etcd) tryUpdate(fn func() error) error {
168158
if err := e.alloc.Restore(existing.Range, existing.Data); err != nil {
169159
return nil, err
170160
}
171-
if err := fn(); err != nil {
172-
return nil, err
173-
}
161+
}
162+
if err := fn(); err != nil {
163+
return nil, err
174164
}
175165
e.last = existing.ResourceVersion
176166
rangeSpec, data := e.alloc.Snapshot()

pkg/registry/core/service/allocator/storage/storage_test.go

+56
Original file line numberDiff line numberDiff line change
@@ -96,3 +96,59 @@ func TestStore(t *testing.T) {
9696
t.Fatal(err)
9797
}
9898
}
99+
100+
// Test that one item is allocated in storage but is not allocated locally
101+
// When try to allocate it, it should fail despite it's free in the local bitmap
102+
// bot not in the storage
103+
func TestAllocatedStorageButReleasedLocally(t *testing.T) {
104+
storage, server, backing, _ := newStorage(t)
105+
defer server.Terminate(t)
106+
if err := storage.storage.Create(context.TODO(), key(), validNewRangeAllocation(), nil, 0); err != nil {
107+
t.Fatalf("unexpected error: %v", err)
108+
}
109+
110+
// Allocate an item in the storage
111+
if _, err := storage.Allocate(2); err != nil {
112+
t.Fatal(err)
113+
}
114+
115+
// Release the item in the local bitmap
116+
// emulating it's out of sync with the storage
117+
err := backing.Release(2)
118+
if err != nil {
119+
t.Fatal(err)
120+
}
121+
122+
// It should fail trying to allocate it deespite it's free
123+
// in the local bitmap because it's not in the storage
124+
ok, err := storage.Allocate(2)
125+
if ok || err != nil {
126+
t.Fatal(err)
127+
}
128+
129+
}
130+
131+
// Test that one item is free in storage but is allocated locally
132+
// When try to allocate it, it should succeed despite it's allocated
133+
// in the local bitmap bot not in the storage
134+
func TestAllocatedLocallyButReleasedStorage(t *testing.T) {
135+
storage, server, backing, _ := newStorage(t)
136+
defer server.Terminate(t)
137+
if err := storage.storage.Create(context.TODO(), key(), validNewRangeAllocation(), nil, 0); err != nil {
138+
t.Fatalf("unexpected error: %v", err)
139+
}
140+
141+
// Allocate an item in the local bitmap only but not in the storage
142+
// emulating it's out of sync with the storage
143+
if _, err := backing.Allocate(2); err != nil {
144+
t.Fatal(err)
145+
}
146+
147+
// It should be able to allocate it
148+
// because it's free in the storage
149+
ok, err := storage.Allocate(2)
150+
if !ok || err != nil {
151+
t.Fatal(err)
152+
}
153+
154+
}

pkg/registry/core/service/portallocator/BUILD

+1
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ filegroup(
4444
srcs = [
4545
":package-srcs",
4646
"//pkg/registry/core/service/portallocator/controller:all-srcs",
47+
"//pkg/registry/core/service/portallocator/storage:all-srcs",
4748
],
4849
tags = ["automanaged"],
4950
)

test/integration/master/kube_apiserver_test.go

+82
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
corev1 "k8s.io/api/core/v1"
2929
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
"k8s.io/apimachinery/pkg/util/intstr"
3132
"k8s.io/apimachinery/pkg/util/wait"
3233
"k8s.io/apiserver/pkg/registry/generic/registry"
3334
"k8s.io/client-go/kubernetes"
@@ -250,3 +251,84 @@ func TestReconcilerMasterLeaseMultiMoreMasters(t *testing.T) {
250251
func TestReconcilerMasterLeaseMultiCombined(t *testing.T) {
251252
testReconcilersMasterLease(t, 3, 3)
252253
}
254+
255+
func TestMultiMasterNodePortAllocation(t *testing.T) {
256+
var kubeAPIServers []*kubeapiservertesting.TestServer
257+
var clientAPIServers []*kubernetes.Clientset
258+
etcd := framework.SharedEtcd()
259+
260+
instanceOptions := &kubeapiservertesting.TestServerInstanceOptions{
261+
DisableStorageCleanup: true,
262+
}
263+
264+
// cleanup the registry storage
265+
defer registry.CleanupStorage()
266+
267+
// create 2 api servers and 2 clients
268+
for i := 0; i < 2; i++ {
269+
// start master count api server
270+
t.Logf("starting api server: %d", i)
271+
server := kubeapiservertesting.StartTestServerOrDie(t, instanceOptions, []string{
272+
"--advertise-address", fmt.Sprintf("10.0.1.%v", i+1),
273+
}, etcd)
274+
kubeAPIServers = append(kubeAPIServers, server)
275+
276+
// verify kube API servers have registered and create a client
277+
if err := wait.PollImmediate(3*time.Second, 2*time.Minute, func() (bool, error) {
278+
client, err := kubernetes.NewForConfig(kubeAPIServers[i].ClientConfig)
279+
if err != nil {
280+
t.Logf("create client error: %v", err)
281+
return false, nil
282+
}
283+
clientAPIServers = append(clientAPIServers, client)
284+
endpoints, err := client.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{})
285+
if err != nil {
286+
t.Logf("error fetching endpoints: %v", err)
287+
return false, nil
288+
}
289+
return verifyEndpointsWithIPs(kubeAPIServers, getEndpointIPs(endpoints)), nil
290+
}); err != nil {
291+
t.Fatalf("did not find only lease endpoints: %v", err)
292+
}
293+
}
294+
295+
serviceObject := &corev1.Service{
296+
ObjectMeta: metav1.ObjectMeta{
297+
Labels: map[string]string{"foo": "bar"},
298+
Name: "test-node-port",
299+
},
300+
Spec: corev1.ServiceSpec{
301+
Ports: []corev1.ServicePort{
302+
{
303+
Name: "nodeport-test",
304+
Port: 443,
305+
TargetPort: intstr.IntOrString{IntVal: 443},
306+
NodePort: 32080,
307+
Protocol: "TCP",
308+
},
309+
},
310+
Type: "NodePort",
311+
Selector: map[string]string{"foo": "bar"},
312+
},
313+
}
314+
315+
// create and delete the same nodePortservice using different APIservers
316+
// to check that API servers are using the same port allocation bitmap
317+
for i := 0; i < 2; i++ {
318+
// Create the service using the first API server
319+
_, err := clientAPIServers[0].CoreV1().Services(metav1.NamespaceDefault).Create(serviceObject)
320+
if err != nil {
321+
t.Fatalf("unable to create service: %v", err)
322+
}
323+
// Delete the service using the second API server
324+
if err := clientAPIServers[1].CoreV1().Services(metav1.NamespaceDefault).Delete(serviceObject.ObjectMeta.Name, &metav1.DeleteOptions{}); err != nil {
325+
t.Fatalf("got unexpected error: %v", err)
326+
}
327+
}
328+
329+
// shutdown the api servers
330+
for _, server := range kubeAPIServers {
331+
server.TearDownFn()
332+
}
333+
334+
}

0 commit comments

Comments
 (0)