Skip to content

Commit 51fbd6e

Browse files
author
Kubernetes Submit Queue
authored
Merge pull request kubernetes#57168 from yastij/predicates-ordering
Automatic merge from submit-queue (batch tested with PRs 57252, 57168). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Implementing predicates ordering **What this PR does / why we need it**: implements predicates ordering for the scheduler **Which issue(s) this PR fixes** : Fixes kubernetes#53812 **Special notes for your reviewer**: @bsalamat @gmarek @resouer as discussed on slack, to implement ordering we have to choices: - use a layered approach with a list that indexes the order of the predicates map - change the underlying data structure used to represent a collection of predicates (a map in our case) into a list of predicates objects. Going with this solution might be "cleaner" but it will require a lot of changes and will increase the cost for accessing predicates from O(1) to O(n) (n being the number of predicates used by the scheduler). we might go with this solution for now. If the number of predicates start growing, we might switch to the second option. **Release note**: ```release-note adding predicates ordering for the kubernetes scheduler. ```
2 parents dd9bca8 + e62952d commit 51fbd6e

File tree

8 files changed

+109
-56
lines changed

8 files changed

+109
-56
lines changed

plugin/pkg/scheduler/algorithm/predicates/predicates.go

+42-3
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,25 @@ import (
4949
)
5050

5151
const (
52-
MatchInterPodAffinity = "MatchInterPodAffinity"
53-
CheckVolumeBinding = "CheckVolumeBinding"
54-
52+
MatchInterPodAffinityPred = "MatchInterPodAffinity"
53+
CheckVolumeBindingPred = "CheckVolumeBinding"
54+
CheckNodeConditionPred = "CheckNodeCondition"
55+
GeneralPred = "GeneralPredicates"
56+
HostNamePred = "HostName"
57+
PodFitsHostPortsPred = "PodFitsHostPorts"
58+
MatchNodeSelectorPred = "MatchNodeSelector"
59+
PodFitsResourcesPred = "PodFitsResources"
60+
NoDiskConflictPred = "NoDiskConflict"
61+
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
62+
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
63+
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
64+
checkServiceAffinityPred = "checkServiceAffinity"
65+
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
66+
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
67+
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
68+
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
69+
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
70+
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
5571
// DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE
5672
// GCE instances can have up to 16 PD volumes attached.
5773
DefaultMaxGCEPDVolumes = 16
@@ -79,6 +95,21 @@ const (
7995
// For example:
8096
// https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422
8197

98+
// IMPORTANT NOTE: this list contains the ordering of the predicates, if you develop a new predicate
99+
// it is mandatory to add its name to this list.
100+
// Otherwise it won't be processed, see generic_scheduler#podFitsOnNode().
101+
// The order is based on the restrictiveness & complexity of predicates.
102+
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
103+
var (
104+
predicatesOrdering = []string{CheckNodeConditionPred,
105+
GeneralPred, HostNamePred, PodFitsHostPortsPred,
106+
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
107+
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
108+
checkServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
109+
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
110+
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
111+
)
112+
82113
// NodeInfo: Other types for predicate functions...
83114
type NodeInfo interface {
84115
GetNodeInfo(nodeID string) (*v1.Node, error)
@@ -93,6 +124,14 @@ type CachedPersistentVolumeInfo struct {
93124
corelisters.PersistentVolumeLister
94125
}
95126

127+
func PredicatesOrdering() []string {
128+
return predicatesOrdering
129+
}
130+
131+
func SetPredicatesOrdering(names []string) {
132+
predicatesOrdering = names
133+
}
134+
96135
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
97136
return c.Get(pvID)
98137
}

plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go

+20-20
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,17 @@ func init() {
6565
// Fit is defined based on the absence of port conflicts.
6666
// This predicate is actually a default predicate, because it is invoked from
6767
// predicates.GeneralPredicates()
68-
factory.RegisterFitPredicate("PodFitsHostPorts", predicates.PodFitsHostPorts)
68+
factory.RegisterFitPredicate(predicates.PodFitsHostPortsPred, predicates.PodFitsHostPorts)
6969
// Fit is determined by resource availability.
7070
// This predicate is actually a default predicate, because it is invoked from
7171
// predicates.GeneralPredicates()
72-
factory.RegisterFitPredicate("PodFitsResources", predicates.PodFitsResources)
72+
factory.RegisterFitPredicate(predicates.PodFitsResourcesPred, predicates.PodFitsResources)
7373
// Fit is determined by the presence of the Host parameter and a string match
7474
// This predicate is actually a default predicate, because it is invoked from
7575
// predicates.GeneralPredicates()
76-
factory.RegisterFitPredicate("HostName", predicates.PodFitsHost)
76+
factory.RegisterFitPredicate(predicates.HostNamePred, predicates.PodFitsHost)
7777
// Fit is determined by node selector query.
78-
factory.RegisterFitPredicate("MatchNodeSelector", predicates.PodMatchNodeSelector)
78+
factory.RegisterFitPredicate(predicates.MatchNodeSelectorPred, predicates.PodMatchNodeSelector)
7979

8080
// Use equivalence class to speed up heavy predicates phase.
8181
factory.RegisterGetEquivalencePodFunction(
@@ -117,62 +117,62 @@ func defaultPredicates() sets.String {
117117
return sets.NewString(
118118
// Fit is determined by volume zone requirements.
119119
factory.RegisterFitPredicateFactory(
120-
"NoVolumeZoneConflict",
120+
predicates.NoVolumeZoneConflictPred,
121121
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
122122
return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo)
123123
},
124124
),
125125
// Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node
126126
factory.RegisterFitPredicateFactory(
127-
"MaxEBSVolumeCount",
127+
predicates.MaxEBSVolumeCountPred,
128128
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
129129
return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo)
130130
},
131131
),
132132
// Fit is determined by whether or not there would be too many GCE PD volumes attached to the node
133133
factory.RegisterFitPredicateFactory(
134-
"MaxGCEPDVolumeCount",
134+
predicates.MaxGCEPDVolumeCountPred,
135135
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
136136
return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo)
137137
},
138138
),
139139
// Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node
140140
factory.RegisterFitPredicateFactory(
141-
"MaxAzureDiskVolumeCount",
141+
predicates.MaxAzureDiskVolumeCountPred,
142142
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
143143
return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo)
144144
},
145145
),
146146
// Fit is determined by inter-pod affinity.
147147
factory.RegisterFitPredicateFactory(
148-
predicates.MatchInterPodAffinity,
148+
predicates.MatchInterPodAffinityPred,
149149
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
150150
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister)
151151
},
152152
),
153153

154154
// Fit is determined by non-conflicting disk volumes.
155-
factory.RegisterFitPredicate("NoDiskConflict", predicates.NoDiskConflict),
155+
factory.RegisterFitPredicate(predicates.NoDiskConflictPred, predicates.NoDiskConflict),
156156

157157
// GeneralPredicates are the predicates that are enforced by all Kubernetes components
158158
// (e.g. kubelet and all schedulers)
159-
factory.RegisterFitPredicate("GeneralPredicates", predicates.GeneralPredicates),
159+
factory.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates),
160160

161161
// Fit is determined by node memory pressure condition.
162-
factory.RegisterFitPredicate("CheckNodeMemoryPressure", predicates.CheckNodeMemoryPressurePredicate),
162+
factory.RegisterFitPredicate(predicates.CheckNodeMemoryPressurePred, predicates.CheckNodeMemoryPressurePredicate),
163163

164164
// Fit is determined by node disk pressure condition.
165-
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
165+
factory.RegisterFitPredicate(predicates.CheckNodeDiskPressurePred, predicates.CheckNodeDiskPressurePredicate),
166166

167167
// Fit is determined by node conditions: not ready, network unavailable or out of disk.
168-
factory.RegisterMandatoryFitPredicate("CheckNodeCondition", predicates.CheckNodeConditionPredicate),
168+
factory.RegisterMandatoryFitPredicate(predicates.CheckNodeConditionPred, predicates.CheckNodeConditionPredicate),
169169

170170
// Fit is determined based on whether a pod can tolerate all of the node's taints
171-
factory.RegisterFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints),
171+
factory.RegisterFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints),
172172

173173
// Fit is determined by volume topology requirements.
174174
factory.RegisterFitPredicateFactory(
175-
predicates.CheckVolumeBinding,
175+
predicates.CheckVolumeBindingPred,
176176
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
177177
return predicates.NewVolumeBindingPredicate(args.VolumeBinder)
178178
},
@@ -185,18 +185,18 @@ func ApplyFeatureGates() {
185185

186186
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) {
187187
// Remove "CheckNodeCondition" predicate
188-
factory.RemoveFitPredicate("CheckNodeCondition")
188+
factory.RemoveFitPredicate(predicates.CheckNodeConditionPred)
189189
// Remove Key "CheckNodeCondition" From All Algorithm Provider
190190
// The key will be removed from all providers which in algorithmProviderMap[]
191191
// if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider()
192-
factory.RemovePredicateKeyFromAlgorithmProviderMap("CheckNodeCondition")
192+
factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred)
193193

194194
// Fit is determined based on whether a pod can tolerate all of the node's taints
195-
factory.RegisterMandatoryFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints)
195+
factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
196196
// Insert Key "PodToleratesNodeTaints" To All Algorithm Provider
197197
// The key will insert to all providers which in algorithmProviderMap[]
198198
// if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider()
199-
factory.InsertPredicateKeyToAlgorithmProviderMap("PodToleratesNodeTaints")
199+
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
200200

201201
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
202202
}

plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ func TestDefaultPredicates(t *testing.T) {
7878
"CheckNodeDiskPressure",
7979
"CheckNodeCondition",
8080
"PodToleratesNodeTaints",
81-
predicates.CheckVolumeBinding,
81+
predicates.CheckVolumeBindingPred,
8282
)
8383

8484
if expected := defaultPredicates(); !result.Equal(expected) {

plugin/pkg/scheduler/core/generic_scheduler.go

+28-25
Original file line numberDiff line numberDiff line change
@@ -444,34 +444,37 @@ func podFitsOnNode(
444444
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
445445
// when pods are nominated or their nominations change.
446446
eCacheAvailable = eCacheAvailable && !podsAdded
447-
for predicateKey, predicate := range predicateFuncs {
448-
if eCacheAvailable {
449-
// PredicateWithECache will return its cached predicate results.
450-
fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash)
451-
}
452-
453-
// TODO(bsalamat): When one predicate fails and fit is false, why do we continue
454-
// checking other predicates?
455-
if !eCacheAvailable || invalid {
456-
// we need to execute predicate functions since equivalence cache does not work
457-
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
458-
if err != nil {
459-
return false, []algorithm.PredicateFailureReason{}, err
460-
}
447+
for _, predicateKey := range predicates.PredicatesOrdering() {
448+
//TODO (yastij) : compute average predicate restrictiveness to export it as promethus metric
449+
if predicate, exist := predicateFuncs[predicateKey]; exist {
461450
if eCacheAvailable {
462-
// Store data to update eCache after this loop.
463-
if res, exists := predicateResults[predicateKey]; exists {
464-
res.Fit = res.Fit && fit
465-
res.FailReasons = append(res.FailReasons, reasons...)
466-
predicateResults[predicateKey] = res
467-
} else {
468-
predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons}
451+
// PredicateWithECache will return its cached predicate results.
452+
fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash)
453+
}
454+
455+
// TODO(bsalamat): When one predicate fails and fit is false, why do we continue
456+
// checking other predicates?
457+
if !eCacheAvailable || invalid {
458+
// we need to execute predicate functions since equivalence cache does not work
459+
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
460+
if err != nil {
461+
return false, []algorithm.PredicateFailureReason{}, err
462+
}
463+
if eCacheAvailable {
464+
// Store data to update eCache after this loop.
465+
if res, exists := predicateResults[predicateKey]; exists {
466+
res.Fit = res.Fit && fit
467+
res.FailReasons = append(res.FailReasons, reasons...)
468+
predicateResults[predicateKey] = res
469+
} else {
470+
predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons}
471+
}
469472
}
470473
}
471-
}
472-
if !fit {
473-
// eCache is available and valid, and predicates result is unfit, record the fail reasons
474-
failedPredicates = append(failedPredicates, reasons...)
474+
if !fit {
475+
// eCache is available and valid, and predicates result is unfit, record the fail reasons
476+
failedPredicates = append(failedPredicates, reasons...)
477+
}
475478
}
476479
}
477480
}

plugin/pkg/scheduler/core/generic_scheduler_test.go

+11-2
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ import (
4242
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
4343
)
4444

45+
var (
46+
order = []string{"false", "true", "matches", "nopods", predicates.MatchInterPodAffinityPred}
47+
)
48+
4549
func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
4650
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
4751
}
@@ -181,6 +185,7 @@ func TestSelectHost(t *testing.T) {
181185
}
182186

183187
func TestGenericScheduler(t *testing.T) {
188+
predicates.SetPredicatesOrdering(order)
184189
tests := []struct {
185190
name string
186191
predicates map[string]algorithm.FitPredicate
@@ -401,6 +406,7 @@ func TestGenericScheduler(t *testing.T) {
401406
}
402407

403408
func TestFindFitAllError(t *testing.T) {
409+
predicates.SetPredicatesOrdering(order)
404410
nodes := []string{"3", "2", "1"}
405411
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}
406412
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
@@ -430,8 +436,9 @@ func TestFindFitAllError(t *testing.T) {
430436
}
431437

432438
func TestFindFitSomeError(t *testing.T) {
439+
predicates.SetPredicatesOrdering(order)
433440
nodes := []string{"3", "2", "1"}
434-
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
441+
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
435442
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}
436443
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
437444
"3": schedulercache.NewNodeInfo(),
@@ -741,6 +748,7 @@ var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int3
741748
// TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes
742749
// that podsFitsOnNode works correctly and is tested separately.
743750
func TestSelectNodesForPreemption(t *testing.T) {
751+
predicates.SetPredicatesOrdering(order)
744752
tests := []struct {
745753
name string
746754
predicates map[string]algorithm.FitPredicate
@@ -864,7 +872,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
864872
nodes = append(nodes, node)
865873
}
866874
if test.addAffinityPredicate {
867-
test.predicates[predicates.MatchInterPodAffinity] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
875+
test.predicates[predicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
868876
}
869877
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
870878
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
@@ -879,6 +887,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
879887

880888
// TestPickOneNodeForPreemption tests pickOneNodeForPreemption.
881889
func TestPickOneNodeForPreemption(t *testing.T) {
890+
predicates.SetPredicatesOrdering(order)
882891
tests := []struct {
883892
name string
884893
predicates map[string]algorithm.FitPredicate

plugin/pkg/scheduler/factory/factory.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) {
409409

410410
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
411411
// Add/delete impacts the available PVs to choose from
412-
invalidPredicates.Insert(predicates.CheckVolumeBinding)
412+
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
413413
}
414414

415415
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
@@ -480,7 +480,7 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim
480480

481481
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
482482
// Add/delete impacts the available PVs to choose from
483-
invalidPredicates.Insert(predicates.CheckVolumeBinding)
483+
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
484484
}
485485
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
486486
}
@@ -491,7 +491,7 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent
491491
if old.Spec.VolumeName != new.Spec.VolumeName {
492492
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
493493
// PVC volume binding has changed
494-
invalidPredicates.Insert(predicates.CheckVolumeBinding)
494+
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
495495
}
496496
// The bound volume type may change
497497
invalidPredicates.Insert(maxPDVolumeCountPredicateKeys...)

plugin/pkg/scheduler/scheduler.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ func (sched *Scheduler) assumeAndBindVolumes(assumed *v1.Pod, host string) error
278278
err = fmt.Errorf("Volume binding started, waiting for completion")
279279
if bindingRequired {
280280
if sched.config.Ecache != nil {
281-
invalidPredicates := sets.NewString(predicates.CheckVolumeBinding)
281+
invalidPredicates := sets.NewString(predicates.CheckVolumeBindingPred)
282282
sched.config.Ecache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
283283
}
284284

plugin/pkg/scheduler/scheduler_test.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi
619619
scache.AddNode(&testNode)
620620

621621
predicateMap := map[string]algorithm.FitPredicate{
622-
"VolumeBindingChecker": predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
622+
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
623623
}
624624

625625
recorder := broadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
@@ -637,6 +637,8 @@ func makePredicateError(failReason string) error {
637637
}
638638

639639
func TestSchedulerWithVolumeBinding(t *testing.T) {
640+
order := []string{predicates.CheckVolumeBindingPred, predicates.GeneralPred}
641+
predicates.SetPredicatesOrdering(order)
640642
findErr := fmt.Errorf("find err")
641643
assumeErr := fmt.Errorf("assume err")
642644
bindErr := fmt.Errorf("bind err")

0 commit comments

Comments
 (0)