Skip to content

Commit e45b230

Browse files
authored
Merge branch 'master' into no-overwrite-tar
2 parents 701cf5c + d63d3bc commit e45b230

File tree

33 files changed

+568
-337
lines changed

33 files changed

+568
-337
lines changed

cmd/minikube/cmd/delete.go

+54-34
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,24 @@ func init() {
8888
RootCmd.AddCommand(deleteCmd)
8989
}
9090

91+
func deleteContainersAndVolumes() {
92+
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
93+
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
94+
if len(errs) > 0 { // it will error if there is no container to delete
95+
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
96+
}
97+
98+
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
99+
if len(errs) > 0 { // it will not error if there is nothing to delete
100+
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
101+
}
102+
103+
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
104+
if len(errs) > 0 { // it will not error if there is nothing to delete
105+
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
106+
}
107+
}
108+
91109
// runDelete handles the executes the flow of "minikube delete"
92110
func runDelete(cmd *cobra.Command, args []string) {
93111
if len(args) > 0 {
@@ -110,23 +128,9 @@ func runDelete(cmd *cobra.Command, args []string) {
110128
}
111129

112130
if deleteAll {
113-
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
114-
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
115-
if len(errs) > 0 { // it will error if there is no container to delete
116-
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
117-
}
131+
deleteContainersAndVolumes()
118132

119-
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
120-
if len(errs) > 0 { // it will not error if there is nothing to delete
121-
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
122-
}
123-
124-
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
125-
if len(errs) > 0 { // it will not error if there is nothing to delete
126-
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
127-
}
128-
129-
errs = DeleteProfiles(profilesToDelete)
133+
errs := DeleteProfiles(profilesToDelete)
130134
if len(errs) > 0 {
131135
HandleDeletionErrors(errs)
132136
} else {
@@ -185,13 +189,11 @@ func DeleteProfiles(profiles []*config.Profile) []error {
185189
return errs
186190
}
187191

188-
func deleteProfile(profile *config.Profile) error {
189-
viper.Set(config.ProfileName, profile.Name)
190-
191-
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
192+
func deleteProfileContainersAndVolumes(name string) {
193+
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
192194
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
193195
if errs != nil { // it will error if there is no container to delete
194-
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
196+
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
195197
}
196198
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
197199
if errs != nil { // it will not error if there is nothing to delete
@@ -202,6 +204,13 @@ func deleteProfile(profile *config.Profile) error {
202204
if len(errs) > 0 { // it will not error if there is nothing to delete
203205
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
204206
}
207+
}
208+
209+
func deleteProfile(profile *config.Profile) error {
210+
viper.Set(config.ProfileName, profile.Name)
211+
212+
deleteProfileContainersAndVolumes(profile.Name)
213+
205214
api, err := machine.NewAPIClient()
206215
if err != nil {
207216
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
@@ -230,37 +239,48 @@ func deleteProfile(profile *config.Profile) error {
230239
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
231240
}
232241

242+
deleteHosts(api, cc)
243+
244+
// In case DeleteHost didn't complete the job.
245+
deleteProfileDirectory(profile.Name)
246+
247+
if err := deleteConfig(profile.Name); err != nil {
248+
return err
249+
}
250+
251+
if err := deleteContext(profile.Name); err != nil {
252+
return err
253+
}
254+
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
255+
return nil
256+
}
257+
258+
func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
233259
if cc != nil {
234260
for _, n := range cc.Nodes {
235261
machineName := driver.MachineName(*cc, n)
236-
if err = machine.DeleteHost(api, machineName); err != nil {
262+
if err := machine.DeleteHost(api, machineName); err != nil {
237263
switch errors.Cause(err).(type) {
238264
case mcnerror.ErrHostDoesNotExist:
239265
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
240266
default:
241267
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
242-
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
268+
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
243269
}
244270
}
245271
}
246272
}
273+
}
247274

248-
// In case DeleteHost didn't complete the job.
249-
deleteProfileDirectory(profile.Name)
250-
251-
if err := config.DeleteProfile(profile.Name); err != nil {
275+
func deleteConfig(profileName string) error {
276+
if err := config.DeleteProfile(profileName); err != nil {
252277
if config.IsNotExist(err) {
253-
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
278+
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
254279
return DeletionError{Err: delErr, Errtype: MissingProfile}
255280
}
256-
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("failed to remove profile %v", err))
281+
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
257282
return DeletionError{Err: delErr, Errtype: Fatal}
258283
}
259-
260-
if err := deleteContext(profile.Name); err != nil {
261-
return err
262-
}
263-
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
264284
return nil
265285
}
266286

cmd/minikube/cmd/start.go

+78-32
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ const (
116116
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
117117
minRecommendedMem = 2000 // Warn at no lower than existing configurations
118118
minimumCPUS = 2
119-
minimumDiskSize = "2000mb"
119+
minimumDiskSize = 2000
120120
autoUpdate = "auto-update-drivers"
121121
hostOnlyNicType = "host-only-nic-type"
122122
natNicType = "nat-nic-type"
@@ -337,14 +337,7 @@ func runStart(cmd *cobra.Command, args []string) {
337337
ssh.SetDefaultClient(ssh.External)
338338
}
339339

340-
var existingAddons map[string]bool
341-
if viper.GetBool(installAddons) {
342-
existingAddons = map[string]bool{}
343-
if existing != nil && existing.Addons != nil {
344-
existingAddons = existing.Addons
345-
}
346-
}
347-
kubeconfig, err := node.Start(mc, n, true, existingAddons)
340+
kubeconfig, err := startNode(existing, mc, n)
348341
if err != nil {
349342
exit.WithError("Starting node", err)
350343
}
@@ -389,6 +382,17 @@ func displayEnviron(env []string) {
389382
}
390383
}
391384

385+
func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) {
386+
var existingAddons map[string]bool
387+
if viper.GetBool(installAddons) {
388+
existingAddons = map[string]bool{}
389+
if existing != nil && existing.Addons != nil {
390+
existingAddons = existing.Addons
391+
}
392+
}
393+
return node.Start(mc, n, true, existingAddons)
394+
}
395+
392396
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
393397
if kcs.KeepContext {
394398
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
@@ -427,8 +431,11 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
427431
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
428432

429433
if client.Major != cluster.Major || minorSkew > 1 {
430-
out.WarningT("{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster",
434+
out.Ln("")
435+
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
431436
out.V{"path": path, "client_version": client, "cluster_version": cluster})
437+
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
438+
out.V{"path": path, "client_version": client})
432439
}
433440
return nil
434441
}
@@ -638,43 +645,62 @@ func validateUser(drvName string) {
638645
}
639646
}
640647

641-
// defaultMemorySize calculates the default memory footprint in MB
642-
func defaultMemorySize(drvName string) int {
643-
fallback := 2200
644-
maximum := 6000
645-
648+
// memoryLimits returns the amount of memory allocated to the system and hypervisor
649+
func memoryLimits(drvName string) (int, int, error) {
646650
v, err := mem.VirtualMemory()
647651
if err != nil {
648-
return fallback
652+
return -1, -1, err
649653
}
650-
available := v.Total / 1024 / 1024
654+
sysLimit := int(v.Total / 1024 / 1024)
655+
containerLimit := 0
651656

652-
// For KIC, do not allocate more memory than the container has available (+ some slack)
653657
if driver.IsKIC(drvName) {
654658
s, err := oci.DaemonInfo(drvName)
655659
if err != nil {
656-
return fallback
660+
return -1, -1, err
661+
}
662+
containerLimit = int(s.TotalMemory / 1024 / 1024)
663+
}
664+
return sysLimit, containerLimit, nil
665+
}
666+
667+
// suggestMemoryAllocation calculates the default memory footprint in MB
668+
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
669+
fallback := 2200
670+
maximum := 6000
671+
672+
if sysLimit > 0 && fallback > sysLimit {
673+
return sysLimit
674+
}
675+
676+
// If there are container limits, add tiny bit of slack for non-minikube components
677+
if containerLimit > 0 {
678+
if fallback > containerLimit {
679+
return containerLimit
657680
}
658-
maximum = int(s.TotalMemory/1024/1024) - 128
681+
maximum = containerLimit - 48
659682
}
660683

661-
suggested := int(available / 4)
684+
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
685+
suggested := int(float32(sysLimit)/400.0) * 100
662686

663687
if suggested > maximum {
664-
suggested = maximum
688+
return maximum
665689
}
666690

667691
if suggested < fallback {
668-
suggested = fallback
692+
return fallback
669693
}
670694

671-
glog.Infof("Selecting memory default of %dMB, given %dMB available and %dMB maximum", suggested, available, maximum)
672695
return suggested
673696
}
674697

675698
// validateMemorySize validates the memory size matches the minimum recommended
676699
func validateMemorySize() {
677-
req := pkgutil.CalculateSizeInMB(viper.GetString(memory))
700+
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
701+
if err != nil {
702+
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
703+
}
678704
if req < minUsableMem && !viper.GetBool(force) {
679705
exit.WithCodeT(exit.Config, "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB",
680706
out.V{"requested": req, "mininum": minUsableMem})
@@ -707,9 +733,13 @@ func validateCPUCount(local bool) {
707733
// validateFlags validates the supplied flags against known bad combinations
708734
func validateFlags(cmd *cobra.Command, drvName string) {
709735
if cmd.Flags().Changed(humanReadableDiskSize) {
710-
diskSizeMB := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
711-
if diskSizeMB < pkgutil.CalculateSizeInMB(minimumDiskSize) && !viper.GetBool(force) {
712-
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": pkgutil.CalculateSizeInMB(minimumDiskSize)})
736+
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
737+
if err != nil {
738+
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
739+
}
740+
741+
if diskSizeMB < minimumDiskSize && !viper.GetBool(force) {
742+
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
713743
}
714744
}
715745

@@ -817,9 +847,20 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
817847
kubeNodeName = "m01"
818848
}
819849

820-
mem := defaultMemorySize(drvName)
821-
if viper.GetString(memory) != "" {
822-
mem = pkgutil.CalculateSizeInMB(viper.GetString(memory))
850+
sysLimit, containerLimit, err := memoryLimits(drvName)
851+
if err != nil {
852+
glog.Warningf("Unable to query memory limits: %v", err)
853+
}
854+
855+
mem := suggestMemoryAllocation(sysLimit, containerLimit)
856+
if cmd.Flags().Changed(memory) {
857+
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
858+
if err != nil {
859+
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
860+
}
861+
862+
} else {
863+
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
823864
}
824865

825866
// Create the initial node, which will necessarily be a control plane
@@ -831,14 +872,19 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
831872
Worker: true,
832873
}
833874

875+
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
876+
if err != nil {
877+
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
878+
}
879+
834880
cfg := config.ClusterConfig{
835881
Name: viper.GetString(config.ProfileName),
836882
KeepContext: viper.GetBool(keepContext),
837883
EmbedCerts: viper.GetBool(embedCerts),
838884
MinikubeISO: viper.GetString(isoURL),
839885
Memory: mem,
840886
CPUs: viper.GetInt(cpus),
841-
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
887+
DiskSize: diskSize,
842888
Driver: drvName,
843889
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
844890
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),

cmd/minikube/cmd/start_test.go

+33-1
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,9 @@ func TestGetKuberneterVersion(t *testing.T) {
7171
}
7272

7373
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
74-
viper.SetDefault(memory, defaultMemorySize)
74+
// Set default disk size value in lieu of flag init
7575
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
76+
7677
originalEnv := os.Getenv("HTTP_PROXY")
7778
defer func() {
7879
err := os.Setenv("HTTP_PROXY", originalEnv)
@@ -124,3 +125,34 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
124125
})
125126
}
126127
}
128+
129+
func TestSuggestMemoryAllocation(t *testing.T) {
130+
var tests = []struct {
131+
description string
132+
sysLimit int
133+
containerLimit int
134+
want int
135+
}{
136+
{"128GB sys", 128000, 0, 6000},
137+
{"64GB sys", 64000, 0, 6000},
138+
{"16GB sys", 16384, 0, 4000},
139+
{"odd sys", 14567, 0, 3600},
140+
{"4GB sys", 4096, 0, 2200},
141+
{"2GB sys", 2048, 0, 2048},
142+
{"Unable to poll sys", 0, 0, 2200},
143+
{"128GB sys, 16GB container", 128000, 16384, 16336},
144+
{"64GB sys, 16GB container", 64000, 16384, 16000},
145+
{"16GB sys, 4GB container", 16384, 4096, 4000},
146+
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
147+
{"2GB sys, 2GB container", 16384, 2048, 2048},
148+
{"2GB sys, unable to poll container", 16384, 0, 4000},
149+
}
150+
for _, test := range tests {
151+
t.Run(test.description, func(t *testing.T) {
152+
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
153+
if got != test.want {
154+
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
155+
}
156+
})
157+
}
158+
}

hack/jenkins/common.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ touch "${HTML_OUT}"
353353
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
354354
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
355355
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
356-
pessimistic_status="$completed with ${fail_num} / ${test_num} failures in ${elapsed}"
356+
pessimistic_status="${fail_num} / ${test_num} failures"
357357
description="completed with ${status} in ${elapsed} minute(s)."
358358
if [ "$status" = "failure" ]; then
359359
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."

0 commit comments

Comments
 (0)