Skip to content

Commit 8ac36bf

Browse files
Merge pull request #2141 from bertinatto/bump-v1.31.3
WRKLDS-1449: Update to Kubernetes v1.31.3
2 parents e3abfef + 247b4a9 commit 8ac36bf

File tree

14 files changed

+1385
-968
lines changed

14 files changed

+1385
-968
lines changed

CHANGELOG/CHANGELOG-1.31.md

+173-49
Large diffs are not rendered by default.

cmd/kube-controller-manager/app/controllermanager_test.go

+26
Original file line numberDiff line numberDiff line change
@@ -220,3 +220,29 @@ func TestTaintEvictionControllerGating(t *testing.T) {
220220
})
221221
}
222222
}
223+
224+
func TestNoCloudProviderControllerStarted(t *testing.T) {
225+
_, ctx := ktesting.NewTestContext(t)
226+
ctx, cancel := context.WithCancel(ctx)
227+
defer cancel()
228+
229+
controllerCtx := ControllerContext{
230+
Cloud: nil,
231+
LoopMode: IncludeCloudLoops,
232+
}
233+
controllerCtx.ComponentConfig.Generic.Controllers = []string{"*"}
234+
for _, controller := range NewControllerDescriptors() {
235+
if !controller.IsCloudProviderController() {
236+
continue
237+
}
238+
239+
controllerName := controller.Name()
240+
checker, err := StartController(ctx, controllerCtx, controller, nil)
241+
if err != nil {
242+
t.Errorf("Error starting controller %q: %v", controllerName, err)
243+
}
244+
if checker != nil {
245+
t.Errorf("Controller %q should not be started", controllerName)
246+
}
247+
}
248+
}

cmd/kube-controller-manager/app/core.go

+12-1
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,12 @@ func newServiceLBControllerDescriptor() *ControllerDescriptor {
9292
}
9393

9494
func startServiceLBController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
95+
logger := klog.FromContext(ctx)
96+
if controllerContext.Cloud == nil {
97+
logger.Info("Warning: service-controller is set, but no cloud provider specified. Will not configure service controller.")
98+
return nil, false, nil
99+
}
100+
95101
serviceController, err := servicecontroller.New(
96102
controllerContext.Cloud,
97103
controllerContext.ClientBuilder.ClientOrDie("service-controller"),
@@ -102,7 +108,7 @@ func startServiceLBController(ctx context.Context, controllerContext ControllerC
102108
)
103109
if err != nil {
104110
// This error shouldn't fail. It lives like this as a legacy.
105-
klog.FromContext(ctx).Error(err, "Failed to start service controller")
111+
logger.Error(err, "Failed to start service controller.")
106112
return nil, false, nil
107113
}
108114
go serviceController.Run(ctx, int(controllerContext.ComponentConfig.ServiceController.ConcurrentServiceSyncs), controllerContext.ControllerManagerMetrics)
@@ -261,6 +267,11 @@ func newCloudNodeLifecycleControllerDescriptor() *ControllerDescriptor {
261267

262268
func startCloudNodeLifecycleController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
263269
logger := klog.FromContext(ctx)
270+
if controllerContext.Cloud == nil {
271+
logger.Info("Warning: node-controller is set, but no cloud provider specified. Will not configure node lifecyle controller.")
272+
return nil, false, nil
273+
}
274+
264275
cloudNodeLifecycleController, err := cloudnodelifecyclecontroller.NewCloudNodeLifecycleController(
265276
controllerContext.InformerFactory.Core().V1().Nodes(),
266277
// cloud node lifecycle controller uses existing cluster role from node-controller

openshift-hack/images/hyperkube/Dockerfile.rhel

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
1414
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
1515
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
1616
io.openshift.tags="openshift,hyperkube" \
17-
io.openshift.build.versions="kubernetes=1.31.2"
17+
io.openshift.build.versions="kubernetes=1.31.3"

pkg/controlplane/controller/leaderelection/run_with_leaderelection.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,9 @@ func RunWithLeaderElection(ctx context.Context, config *rest.Config, newRunnerFn
5050
run(ctx, 1)
5151
},
5252
OnStoppedLeading: func() {
53-
cancel()
53+
if cancel != nil {
54+
cancel()
55+
}
5456
},
5557
}
5658

pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go

+7
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,8 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
191191
}
192192
var victims []*v1.Pod
193193
numViolatingVictim := 0
194+
// Sort potentialVictims by pod priority from high to low, which ensures to
195+
// reprieve higher priority pods first.
194196
sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i].Pod, potentialVictims[j].Pod) })
195197
// Try to reprieve as many pods as possible. We first try to reprieve the PDB
196198
// violating victims and then other non-violating ones. In both cases, we start
@@ -225,6 +227,11 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
225227
return nil, 0, framework.AsStatus(err)
226228
}
227229
}
230+
231+
// Sort victims after reprieving pods to keep the pods in the victims sorted in order of priority from high to low.
232+
if len(violatingVictims) != 0 && len(nonViolatingVictims) != 0 {
233+
sort.Slice(victims, func(i, j int) bool { return util.MoreImportantPod(victims[i], victims[j]) })
234+
}
228235
return victims, numViolatingVictim, framework.NewStatus(framework.Success)
229236
}
230237

pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go

+38
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"encoding/json"
2222
"errors"
2323
"fmt"
24+
"math"
2425
"math/rand"
2526
"sort"
2627
"strings"
@@ -142,13 +143,20 @@ func (pl *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, p
142143
return nil
143144
}
144145

146+
const (
147+
LabelKeyIsViolatingPDB = "test.kubernetes.io/is-violating-pdb"
148+
LabelValueViolatingPDB = "violating"
149+
LabelValueNonViolatingPDB = "non-violating"
150+
)
151+
145152
func TestPostFilter(t *testing.T) {
146153
onePodRes := map[v1.ResourceName]string{v1.ResourcePods: "1"}
147154
nodeRes := map[v1.ResourceName]string{v1.ResourceCPU: "200m", v1.ResourceMemory: "400"}
148155
tests := []struct {
149156
name string
150157
pod *v1.Pod
151158
pods []*v1.Pod
159+
pdbs []*policy.PodDisruptionBudget
152160
nodes []*v1.Node
153161
filteredNodesStatuses framework.NodeToStatusMap
154162
extender framework.Extender
@@ -218,6 +226,29 @@ func TestPostFilter(t *testing.T) {
218226
wantResult: framework.NewPostFilterResultWithNominatedNode("node2"),
219227
wantStatus: framework.NewStatus(framework.Success),
220228
},
229+
{
230+
name: "pod can be made schedulable on minHighestPriority node",
231+
pod: st.MakePod().Name("p").UID("p").Namespace(v1.NamespaceDefault).Priority(veryHighPriority).Obj(),
232+
pods: []*v1.Pod{
233+
st.MakePod().Name("p1").UID("p1").Label(LabelKeyIsViolatingPDB, LabelValueNonViolatingPDB).Namespace(v1.NamespaceDefault).Priority(highPriority).Node("node1").Obj(),
234+
st.MakePod().Name("p2").UID("p2").Label(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).Namespace(v1.NamespaceDefault).Priority(lowPriority).Node("node1").Obj(),
235+
st.MakePod().Name("p3").UID("p3").Label(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).Namespace(v1.NamespaceDefault).Priority(midPriority).Node("node2").Obj(),
236+
},
237+
pdbs: []*policy.PodDisruptionBudget{
238+
st.MakePDB().Name("violating-pdb").Namespace(v1.NamespaceDefault).MatchLabel(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).MinAvailable("100%").Obj(),
239+
st.MakePDB().Name("non-violating-pdb").Namespace(v1.NamespaceDefault).MatchLabel(LabelKeyIsViolatingPDB, LabelValueNonViolatingPDB).MinAvailable("0").DisruptionsAllowed(math.MaxInt32).Obj(),
240+
},
241+
nodes: []*v1.Node{
242+
st.MakeNode().Name("node1").Capacity(onePodRes).Obj(),
243+
st.MakeNode().Name("node2").Capacity(onePodRes).Obj(),
244+
},
245+
filteredNodesStatuses: framework.NodeToStatusMap{
246+
"node1": framework.NewStatus(framework.Unschedulable),
247+
"node2": framework.NewStatus(framework.Unschedulable),
248+
},
249+
wantResult: framework.NewPostFilterResultWithNominatedNode("node2"),
250+
wantStatus: framework.NewStatus(framework.Success),
251+
},
221252
{
222253
name: "preemption result filtered out by extenders",
223254
pod: st.MakePod().Name("p").UID("p").Namespace(v1.NamespaceDefault).Priority(highPriority).Obj(),
@@ -347,6 +378,13 @@ func TestPostFilter(t *testing.T) {
347378
for i := range tt.pods {
348379
podInformer.GetStore().Add(tt.pods[i])
349380
}
381+
pdbInformer := informerFactory.Policy().V1().PodDisruptionBudgets().Informer()
382+
for i := range tt.pdbs {
383+
if err := pdbInformer.GetStore().Add(tt.pdbs[i]); err != nil {
384+
t.Fatal(err)
385+
}
386+
}
387+
350388
// Register NodeResourceFit as the Filter & PreFilter plugin.
351389
registeredPlugins := []tf.RegisterPluginFunc{
352390
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),

pkg/scheduler/testing/wrappers.go

+60
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,13 @@ import (
2121
"time"
2222

2323
v1 "k8s.io/api/core/v1"
24+
policy "k8s.io/api/policy/v1"
2425
resourceapi "k8s.io/api/resource/v1alpha3"
2526
"k8s.io/apimachinery/pkg/api/resource"
2627
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2728
"k8s.io/apimachinery/pkg/runtime/schema"
2829
"k8s.io/apimachinery/pkg/types"
30+
"k8s.io/apimachinery/pkg/util/intstr"
2931
imageutils "k8s.io/kubernetes/test/utils/image"
3032
"k8s.io/utils/ptr"
3133
)
@@ -212,6 +214,64 @@ func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *Co
212214
return c
213215
}
214216

217+
// PodDisruptionBudgetWrapper wraps a PodDisruptionBudget inside.
218+
type PodDisruptionBudgetWrapper struct {
219+
policy.PodDisruptionBudget
220+
}
221+
222+
// MakePDB creates a PodDisruptionBudget wrapper.
223+
func MakePDB() *PodDisruptionBudgetWrapper {
224+
return &PodDisruptionBudgetWrapper{policy.PodDisruptionBudget{}}
225+
}
226+
227+
// Obj returns the inner PodDisruptionBudget.
228+
func (p *PodDisruptionBudgetWrapper) Obj() *policy.PodDisruptionBudget {
229+
return &p.PodDisruptionBudget
230+
}
231+
232+
// Name sets `name` as the name of the inner PodDisruptionBudget.
233+
func (p *PodDisruptionBudgetWrapper) Name(name string) *PodDisruptionBudgetWrapper {
234+
p.SetName(name)
235+
return p
236+
}
237+
238+
// Namespace sets `namespace` as the namespace of the inner PodDisruptionBudget.
239+
func (p *PodDisruptionBudgetWrapper) Namespace(namespace string) *PodDisruptionBudgetWrapper {
240+
p.SetNamespace(namespace)
241+
return p
242+
}
243+
244+
// MinAvailable sets `minAvailable` to the inner PodDisruptionBudget.Spec.MinAvailable.
245+
func (p *PodDisruptionBudgetWrapper) MinAvailable(minAvailable string) *PodDisruptionBudgetWrapper {
246+
p.Spec.MinAvailable = &intstr.IntOrString{
247+
Type: intstr.String,
248+
StrVal: minAvailable,
249+
}
250+
return p
251+
}
252+
253+
// MatchLabel adds a {key,value} to the inner PodDisruptionBudget.Spec.Selector.MatchLabels.
254+
func (p *PodDisruptionBudgetWrapper) MatchLabel(key, value string) *PodDisruptionBudgetWrapper {
255+
selector := p.Spec.Selector
256+
if selector == nil {
257+
selector = &metav1.LabelSelector{}
258+
}
259+
matchLabels := selector.MatchLabels
260+
if matchLabels == nil {
261+
matchLabels = map[string]string{}
262+
}
263+
matchLabels[key] = value
264+
selector.MatchLabels = matchLabels
265+
p.Spec.Selector = selector
266+
return p
267+
}
268+
269+
// DisruptionsAllowed sets `disruptionsAllowed` to the inner PodDisruptionBudget.Status.DisruptionsAllowed.
270+
func (p *PodDisruptionBudgetWrapper) DisruptionsAllowed(disruptionsAllowed int32) *PodDisruptionBudgetWrapper {
271+
p.Status.DisruptionsAllowed = disruptionsAllowed
272+
return p
273+
}
274+
215275
// PodWrapper wraps a Pod inside.
216276
type PodWrapper struct{ v1.Pod }
217277

staging/src/k8s.io/apiserver/pkg/server/config.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -1234,7 +1234,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati
12341234
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
12351235
Name: user.APIServerUser,
12361236
UID: uid,
1237-
Groups: []string{user.SystemPrivilegedGroup},
1237+
Groups: []string{user.AllAuthenticated, user.SystemPrivilegedGroup},
12381238
}
12391239

12401240
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences)

staging/src/k8s.io/apiserver/pkg/server/config_test.go

+29
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
"k8s.io/apiserver/pkg/audit/policy"
3939
"k8s.io/apiserver/pkg/authentication/authenticator"
4040
"k8s.io/apiserver/pkg/authentication/user"
41+
"k8s.io/apiserver/pkg/authorization/authorizer"
4142
"k8s.io/apiserver/pkg/endpoints/request"
4243
"k8s.io/apiserver/pkg/server/healthz"
4344
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -83,6 +84,34 @@ func TestAuthorizeClientBearerTokenNoops(t *testing.T) {
8384
}
8485
}
8586

87+
func TestAuthorizeClientBearerTokenRequiredGroups(t *testing.T) {
88+
fakeAuthenticator := authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) {
89+
return &authenticator.Response{User: &user.DefaultInfo{}}, false, nil
90+
})
91+
fakeAuthorizer := authorizer.AuthorizerFunc(func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
92+
return authorizer.DecisionAllow, "", nil
93+
})
94+
target := &rest.Config{BearerToken: "secretToken"}
95+
authN := &AuthenticationInfo{Authenticator: fakeAuthenticator}
96+
authC := &AuthorizationInfo{Authorizer: fakeAuthorizer}
97+
98+
AuthorizeClientBearerToken(target, authN, authC)
99+
100+
fakeRequest, err := http.NewRequest("", "", nil)
101+
if err != nil {
102+
t.Fatal(err)
103+
}
104+
fakeRequest.Header.Set("Authorization", "bearer secretToken")
105+
rsp, _, err := authN.Authenticator.AuthenticateRequest(fakeRequest)
106+
if err != nil {
107+
t.Fatal(err)
108+
}
109+
expectedGroups := []string{user.AllAuthenticated, user.SystemPrivilegedGroup}
110+
if !reflect.DeepEqual(expectedGroups, rsp.User.GetGroups()) {
111+
t.Fatalf("unexpected groups = %v returned, expected = %v", rsp.User.GetGroups(), expectedGroups)
112+
}
113+
}
114+
86115
func TestNewWithDelegate(t *testing.T) {
87116
_, ctx := ktesting.NewTestContext(t)
88117
ctx, cancel := context.WithCancelCause(ctx)

staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go

+7-9
Original file line numberDiff line numberDiff line change
@@ -904,6 +904,7 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
904904
case 1:
905905
podList.ListMeta = metav1.ListMeta{ResourceVersion: "10"}
906906
default:
907+
t.Errorf("unexpected list call: %d", listCalls)
907908
err = fmt.Errorf("unexpected list call")
908909
}
909910
listCalls++
@@ -926,8 +927,11 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
926927
for i := 12; i < 18; i++ {
927928
w.Add(makePod(i))
928929
}
929-
w.Stop()
930+
// Keep the watch open to avoid another reinitialization,
931+
// but register it for cleanup.
932+
t.Cleanup(func() { w.Stop() })
930933
default:
934+
t.Errorf("unexpected watch call: %d", watchCalls)
931935
err = fmt.Errorf("unexpected watch call")
932936
}
933937
watchCalls++
@@ -949,7 +953,6 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
949953
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
950954
defer cancel()
951955

952-
errCh := make(chan error, concurrency)
953956
for i := 0; i < concurrency; i++ {
954957
go func() {
955958
defer wg.Done()
@@ -973,11 +976,11 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
973976
}
974977
rv, err := strconv.Atoi(object.(*example.Pod).ResourceVersion)
975978
if err != nil {
976-
errCh <- fmt.Errorf("incorrect resource version: %v", err)
979+
t.Errorf("incorrect resource version: %v", err)
977980
return
978981
}
979982
if prevRV != -1 && prevRV+1 != rv {
980-
errCh <- fmt.Errorf("unexpected event received, prevRV=%d, rv=%d", prevRV, rv)
983+
t.Errorf("unexpected event received, prevRV=%d, rv=%d", prevRV, rv)
981984
return
982985
}
983986
prevRV = rv
@@ -986,11 +989,6 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
986989
}()
987990
}
988991
wg.Wait()
989-
close(errCh)
990-
991-
for err := range errCh {
992-
t.Error(err)
993-
}
994992
}
995993

996994
func TestCacherNoLeakWithMultipleWatchers(t *testing.T) {

staging/src/k8s.io/cloud-provider/controllers/service/controller.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ func New(
110110
featureGate featuregate.FeatureGate,
111111
) (*Controller, error) {
112112
registerMetrics()
113+
113114
s := &Controller{
114115
cloud: cloud,
115116
kubeClient: kubeClient,
@@ -128,6 +129,10 @@ func New(
128129
lastSyncedNodes: make(map[string][]*v1.Node),
129130
}
130131

132+
if err := s.init(); err != nil {
133+
return nil, err
134+
}
135+
131136
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
132137
cache.ResourceEventHandlerFuncs{
133138
AddFunc: func(cur interface{}) {
@@ -182,10 +187,6 @@ func New(
182187
nodeSyncPeriod,
183188
)
184189

185-
if err := s.init(); err != nil {
186-
return nil, err
187-
}
188-
189190
return s, nil
190191
}
191192

0 commit comments

Comments
 (0)