Skip to content

Commit 3f04e0b

Browse files
Merge pull request #20721 from deads2k/cli-02-kubeconfig
accept --kubeconfig like kubectl
2 parents 64ac00d + f20c0a4 commit 3f04e0b

File tree

8 files changed

+580
-30
lines changed

8 files changed

+580
-30
lines changed

contrib/completions/bash/oc

+278-4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

contrib/completions/zsh/oc

+278-4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

hack/test-cmd.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ for test in "${tests[@]}"; do
104104
done
105105

106106
os::log::debug "Metrics information logged to ${LOG_DIR}/metrics.log"
107-
oc get --raw /metrics --config="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
107+
oc get --raw /metrics --kubeconfig="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
108108

109109
if [[ -n "${failed:-}" ]]; then
110110
exit 1

test/cmd/login.sh

+6-5
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@ if [[ "${API_SCHEME}" == "https" ]]; then
4444
fi
4545

4646
# remove self-provisioner role from user and test login prompt before creating any projects
47-
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
47+
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
4848
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. Contact your system administrator to request a project"
4949
# make sure standard login prompt is printed once self-provisioner status is restored
50-
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
50+
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
5151
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
5252
# make sure `oc login` fails with unauthorized error
5353
os::cmd::expect_failure_and_text 'oc login <<< \n' 'Login failed \(401 Unauthorized\)'
@@ -87,7 +87,7 @@ os::cmd::expect_failure_and_text 'oc get pods' '"system:anonymous" cannot list p
8787

8888
# make sure we report an error if the config file we pass is not writable
8989
# Does not work inside of a container, determine why and reenable
90-
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--config=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
90+
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--kubeconfig=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
9191
echo "login warnings: ok"
9292

9393
# login and create serviceaccount and test login and logout with a service account token
@@ -107,11 +107,12 @@ os::cmd::expect_success 'oc project project-foo'
107107
os::cmd::expect_success_and_text 'oc config view' "current-context.+project-foo/${API_HOST}:${API_PORT}/test-user"
108108
os::cmd::expect_success_and_text 'oc whoami' 'test-user'
109109
os::cmd::expect_success_and_text "oc whoami --config='${login_kubeconfig}'" 'system:admin'
110+
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
110111
os::cmd::expect_success_and_text 'oc whoami -t' '.'
111112
os::cmd::expect_success_and_text 'oc whoami -c' '.'
112113

113-
# test config files from the --config flag
114-
os::cmd::expect_success "oc get services --config='${login_kubeconfig}'"
114+
# test config files from the --kubeconfig flag
115+
os::cmd::expect_success "oc get services --kubeconfig='${login_kubeconfig}'"
115116
# test config files from env vars
116117
os::cmd::expect_success "KUBECONFIG='${login_kubeconfig}' oc get services"
117118
os::test::junit::declare_suite_end

test/cmd/policy.sh

+4-4
Original file line numberDiff line numberDiff line change
@@ -251,28 +251,28 @@ os::cmd::expect_success 'oc adm policy add-cluster-role-to-user alternate-cluste
251251
# switch to test user to be sure that default project admin policy works properly
252252
new_kubeconfig="${workingdir}/tempconfig"
253253
os::cmd::expect_success "oc config view --raw > $new_kubeconfig"
254-
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --config=${new_kubeconfig}"
254+
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --kubeconfig=${new_kubeconfig}"
255255

256256
# alternate-cluster-admin should default to having star rights, so he should be able to update his role to that
257257
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
258258
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
259259
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
260260
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
261-
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
261+
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
262262

263263
# alternate-cluster-admin can restrict himself to less groups (no star)
264264
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
265265
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
266266
cp ${OS_ROOT}/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml ${workingdir}
267267
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/cluster_admin_without_apigroups.yaml
268-
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
268+
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
269269

270270
# alternate-cluster-admin should NOT have the power add back star now (anything other than star is considered less so this mimics testing against no groups)
271271
os::cmd::try_until_failure "oc policy who-can update hpa.autoscaling | grep -q alternate-cluster-admin-user"
272272
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
273273
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
274274
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
275-
os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
275+
os::cmd::expect_failure_and_text "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
276276

277277
# This test validates cluster level policy for serviceaccounts
278278
# ensure service account cannot list pods at the namespace level

test/cmd/status.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ os::cmd::try_until_text "oc get projects -o jsonpath='{.items}'" "^\[\]$"
2727
os::cmd::expect_success 'oc logout'
2828

2929
# remove self-provisioner role from user and test login prompt before creating any projects
30-
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
30+
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
3131

3232
# login as 'test-user'
3333
os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything"
@@ -37,7 +37,7 @@ os::cmd::expect_success_and_text 'oc status' "You don't have any projects. Conta
3737
os::cmd::expect_success_and_text 'oc status --all-namespaces' "Showing all projects on server"
3838
# make sure standard login prompt is printed once self-provisioner status is restored
3939
os::cmd::expect_success "oc logout"
40-
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
40+
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
4141
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
4242

4343
# make sure `oc status` re-uses the correct "no projects" message from `oc login`

test/extended/alternate_certs.sh

+10-10
Original file line numberDiff line numberDiff line change
@@ -52,24 +52,24 @@ OPENSHIFT_ON_PANIC=crash openshift start master \
5252
OS_PID=$!
5353

5454
# Wait for the server to be up
55-
os::cmd::try_until_success "oc whoami --config=master/admin.kubeconfig"
55+
os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig"
5656

5757
# Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both
5858
os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca'
5959
os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer'
6060

6161
# Verify kubeconfigs have connectivity to hosts serving with custom and generated certs
62-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig" 'system:admin'
63-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
64-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
62+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin'
63+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
64+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
6565

66-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig" 'system:openshift-master'
67-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
68-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
66+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master'
67+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
68+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
6969

70-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig" 'system:node:mynode'
71-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
72-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
70+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode'
71+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
72+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
7373

7474
os::test::junit::declare_suite_end
7575

vendor/k8s.io/kubernetes/pkg/kubectl/genericclioptions/config_flags.go

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)