Skip to content

Commit e098a3c

Browse files
authored
Merge pull request #7449 from medyagh/fix_soft_start_nondocker
Behavior change: start with no arguments uses existing cluster config
2 parents 5114bed + 65e1ff3 commit e098a3c

File tree

7 files changed

+657
-401
lines changed

7 files changed

+657
-401
lines changed

Diff for: cmd/minikube/cmd/flags.go

-27
This file was deleted.

Diff for: cmd/minikube/cmd/start.go

+7-343
Large diffs are not rendered by default.

Diff for: cmd/minikube/cmd/start_flags.go

+587
Large diffs are not rendered by default.

Diff for: cmd/minikube/cmd/start_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ func TestMirrorCountry(t *testing.T) {
114114
cmd := &cobra.Command{}
115115
viper.SetDefault(imageRepository, test.imageRepository)
116116
viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
117-
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
117+
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
118118
if err != nil {
119119
t.Fatalf("Got unexpected error %v during config generation", err)
120120
}
@@ -166,7 +166,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
166166
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
167167
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
168168
}
169-
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
169+
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
170170
if err != nil {
171171
t.Fatalf("Got unexpected error %v during config generation", err)
172172
}

Diff for: pkg/minikube/config/config.go

-1
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,6 @@ func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...
193193
}
194194

195195
func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error {
196-
// Move to profile package
197196
path := profileFilePath(profileName, miniHome...)
198197
contents, err := json.MarshalIndent(cc, "", " ")
199198
if err != nil {

Diff for: pkg/minikube/node/start.go

+22-26
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"net"
2222
"os"
2323
"os/exec"
24+
"runtime/debug"
2425
"strconv"
2526
"strings"
2627
"sync"
@@ -56,13 +57,7 @@ import (
5657
"k8s.io/minikube/pkg/util/retry"
5758
)
5859

59-
const (
60-
waitTimeout = "wait-timeout"
61-
embedCerts = "embed-certs"
62-
keepContext = "keep-context"
63-
imageRepository = "image-repository"
64-
containerRuntime = "container-runtime"
65-
)
60+
const waitTimeout = "wait-timeout"
6661

6762
// Start spins up a guest and starts the kubernetes node.
6863
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
@@ -103,7 +98,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
10398
}
10499

105100
// configure the runtime (docker, containerd, crio)
106-
cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv)
101+
cr := configureRuntimes(mRunner, cc, sv)
107102
showVersionInfo(n.KubernetesVersion, cr)
108103

109104
var bs bootstrapper.Bootstrapper
@@ -189,10 +184,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
189184
}
190185

191186
// ConfigureRuntimes does what needs to happen to get a runtime going.
192-
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager {
187+
func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, kv semver.Version) cruntime.Manager {
193188
co := cruntime.Config{
194-
Type: viper.GetString(containerRuntime),
195-
Runner: runner, ImageRepository: k8s.ImageRepository,
189+
Type: cc.KubernetesConfig.ContainerRuntime,
190+
Runner: runner,
191+
ImageRepository: cc.KubernetesConfig.ImageRepository,
196192
KubernetesVersion: kv,
197193
}
198194
cr, err := cruntime.New(co)
@@ -201,28 +197,29 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config
201197
}
202198

203199
disableOthers := true
204-
if driver.BareMetal(drvName) {
200+
if driver.BareMetal(cc.Driver) {
205201
disableOthers = false
206202
}
207203

208204
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
209-
if driver.IsVM(drvName) {
210-
if err := cr.Preload(k8s); err != nil {
205+
if driver.IsVM(cc.Driver) {
206+
if err := cr.Preload(cc.KubernetesConfig); err != nil {
211207
switch err.(type) {
212208
case *cruntime.ErrISOFeature:
213209
out.ErrT(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err})
214210
default:
215211
glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err)
216212
}
217213

218-
if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
214+
if err := machine.CacheImagesForBootstrapper(cc.KubernetesConfig.ImageRepository, cc.KubernetesConfig.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
219215
exit.WithError("Failed to cache images", err)
220216
}
221217
}
222218
}
223219

224220
err = cr.Enable(disableOthers)
225221
if err != nil {
222+
debug.PrintStack()
226223
exit.WithError("Failed to enable container runtime", err)
227224
}
228225

@@ -275,8 +272,8 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu
275272
ClientCertificate: localpath.ClientCert(cc.Name),
276273
ClientKey: localpath.ClientKey(cc.Name),
277274
CertificateAuthority: localpath.CACert(),
278-
KeepContext: viper.GetBool(keepContext),
279-
EmbedCerts: viper.GetBool(embedCerts),
275+
KeepContext: cc.KeepContext,
276+
EmbedCerts: cc.EmbedCerts,
280277
}
281278

282279
kcs.SetPath(kubeconfig.PathFromEnv())
@@ -303,7 +300,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
303300
exit.WithError("Failed to get command runner", err)
304301
}
305302

306-
ip := validateNetwork(host, runner)
303+
ip := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)
307304

308305
// Bypass proxy for minikube's vm host ip
309306
err = proxy.ExcludeIP(ip)
@@ -352,7 +349,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
352349
}
353350

354351
// validateNetwork tries to catch network problems as soon as possible
355-
func validateNetwork(h *host.Host, r command.Runner) string {
352+
func validateNetwork(h *host.Host, r command.Runner, imageRepository string) string {
356353
ip, err := h.Driver.GetIP()
357354
if err != nil {
358355
exit.WithError("Unable to get VM IP address", err)
@@ -381,7 +378,7 @@ func validateNetwork(h *host.Host, r command.Runner) string {
381378
}
382379

383380
// Non-blocking
384-
go tryRegistry(r, h.Driver.DriverName())
381+
go tryRegistry(r, h.Driver.DriverName(), imageRepository)
385382
return ip
386383
}
387384

@@ -423,7 +420,7 @@ func trySSH(h *host.Host, ip string) {
423420
}
424421

425422
// tryRegistry tries to connect to the image repository
426-
func tryRegistry(r command.Runner, driverName string) {
423+
func tryRegistry(r command.Runner, driverName string, imageRepository string) {
427424
// 2 second timeout. For best results, call tryRegistry in a non-blocking manner.
428425
opts := []string{"-sS", "-m", "2"}
429426

@@ -432,15 +429,14 @@ func tryRegistry(r command.Runner, driverName string) {
432429
opts = append([]string{"-x", proxy}, opts...)
433430
}
434431

435-
repo := viper.GetString(imageRepository)
436-
if repo == "" {
437-
repo = images.DefaultKubernetesRepo
432+
if imageRepository == "" {
433+
imageRepository = images.DefaultKubernetesRepo
438434
}
439435

440-
opts = append(opts, fmt.Sprintf("https://%s/", repo))
436+
opts = append(opts, fmt.Sprintf("https://%s/", imageRepository))
441437
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
442438
glog.Warningf("%s failed: %v", rr.Args, err)
443-
out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": repo, "type": driver.MachineType(driverName)})
439+
out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": imageRepository, "type": driver.MachineType(driverName)})
444440
out.ErrT(out.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/")
445441
}
446442
}

Diff for: test/integration/functional_test.go

+39-2
Original file line numberDiff line numberDiff line change
@@ -38,20 +38,24 @@ import (
3838

3939
"github.com/google/go-cmp/cmp"
4040

41+
"k8s.io/minikube/pkg/minikube/config"
4142
"k8s.io/minikube/pkg/minikube/localpath"
43+
"k8s.io/minikube/pkg/util/retry"
4244

4345
"github.com/elazarl/goproxy"
4446
"github.com/hashicorp/go-retryablehttp"
4547
"github.com/otiai10/copy"
4648
"github.com/phayes/freeport"
4749
"github.com/pkg/errors"
4850
"golang.org/x/build/kubernetes/api"
49-
"k8s.io/minikube/pkg/util/retry"
5051
)
5152

5253
// validateFunc are for subtests that share a single setup
5354
type validateFunc func(context.Context, *testing.T, string)
5455

56+
// used in validateStartWithProxy and validateSoftStart
57+
var apiPortTest = 8441
58+
5559
// TestFunctional are functionality tests which can safely share a profile in parallel
5660
func TestFunctional(t *testing.T) {
5761

@@ -80,6 +84,7 @@ func TestFunctional(t *testing.T) {
8084
}{
8185
{"CopySyncFile", setupFileSync}, // Set file for the file sync test case
8286
{"StartWithProxy", validateStartWithProxy}, // Set everything else up for success
87+
{"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change.
8388
{"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start"
8489
{"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up
8590
{"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy
@@ -184,7 +189,8 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
184189
}
185190

186191
// Use more memory so that we may reliably fit MySQL and nginx
187-
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
192+
// changing api server so later in soft start we verify it didn't change
193+
startArgs := append([]string{"start", "-p", profile, fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=true"}, StartArgs()...)
188194
c := exec.CommandContext(ctx, Target(), startArgs...)
189195
env := os.Environ()
190196
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
@@ -206,6 +212,37 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
206212
}
207213
}
208214

215+
// validateSoftStart validates that after minikube already started, a "minikube start" should not change the configs.
216+
func validateSoftStart(ctx context.Context, t *testing.T, profile string) {
217+
start := time.Now()
218+
// the test before this had been start with --apiserver-port=8441
219+
beforeCfg, err := config.LoadProfile(profile)
220+
if err != nil {
221+
t.Errorf("error reading cluster config before soft start: %v", err)
222+
}
223+
if beforeCfg.Config.KubernetesConfig.NodePort != apiPortTest {
224+
t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort)
225+
}
226+
227+
softStartArgs := []string{"start", "-p", profile}
228+
c := exec.CommandContext(ctx, Target(), softStartArgs...)
229+
rr, err := Run(t, c)
230+
if err != nil {
231+
t.Errorf("failed to soft start minikube. args %q: %v", rr.Command(), err)
232+
}
233+
t.Logf("soft start took %s for %q cluster.", time.Since(start), profile)
234+
235+
afterCfg, err := config.LoadProfile(profile)
236+
if err != nil {
237+
t.Errorf("error reading cluster config after soft start: %v", err)
238+
}
239+
240+
if afterCfg.Config.KubernetesConfig.NodePort != apiPortTest {
241+
t.Errorf("expected node port in the config not change after soft start. exepceted node port to be %d but got %d.", apiPortTest, afterCfg.Config.KubernetesConfig.NodePort)
242+
}
243+
244+
}
245+
209246
// validateKubeContext asserts that kubectl is properly configured (race-condition prone!)
210247
func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
211248
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context"))

0 commit comments

Comments
 (0)