diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml index 8436fe4da..d3cf7f34c 100644 --- a/.github/workflows/pages.yaml +++ b/.github/workflows/pages.yaml @@ -1,24 +1,46 @@ -name: Deploy Documentation site +name: Build/Deploy Documentation on: + # So we can trigger manually if needed + workflow_dispatch: + + # To confirm any changes to docs build successfully, without deploying them + pull_request: + paths: + - "docs/**" + - ".github/workflows/pages.yaml" + - "mkdocs.yml" + + # Pushes to branches do the full build + deployment push: branches: - main + paths: + - "docs/**" + - ".github/workflows/pages.yaml" + - "mkdocs.yml" + permissions: contents: write + +# Don't allow multiple simultaneous instances because that would make deploying the docs nondeterministic +concurrency: + group: ${{ github.workflow }} + jobs: - deploy: + docs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: python-version: 3.x - - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV - - uses: actions/cache@v3 - with: - key: mkdocs-material-${{ env.cache_id }} - path: .cache - restore-keys: | - mkdocs-material- - - run: pip install mkdocs-material - - run: mkdocs gh-deploy --force \ No newline at end of file + cache: pip + + # Deploy on merge + - run: make deploy-docs + if: github.event_name == 'push' + + # Only build on everything else + - run: make build-docs + if: github.event_name != 'push' diff --git a/.gitignore b/.gitignore index 498367fda..273b66b81 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,6 @@ install.sh .\#* # documentation website asset folder -docs/_site +site .tiltbuild/ diff --git a/Makefile b/Makefile index 9667eea95..6a4573571 100644 --- a/Makefile +++ b/Makefile @@ -243,3 +243,26 @@ deploy: manifests $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + + +##@ Docs + +VENVDIR=$(abspath docs/.venv) +REQUIREMENTS_TXT=docs/requirements.txt + +.PHONY: build-docs +build-docs: venv + . $(VENV)/activate; \ + mkdocs build + +.PHONY: serve-docs +serve-docs: venv + . $(VENV)/activate; \ + mkdocs serve + +.PHONY: deploy-docs +deploy-docs: venv + . $(VENV)/activate; \ + mkdocs gh-deploy --force + +include Makefile.venv diff --git a/Makefile.venv b/Makefile.venv new file mode 100644 index 000000000..c79b9bbcd --- /dev/null +++ b/Makefile.venv @@ -0,0 +1,274 @@ +# +# SEAMLESSLY MANAGE PYTHON VIRTUAL ENVIRONMENT WITH A MAKEFILE +# +# https://github.com/sio/Makefile.venv v2022.07.20 +# +# +# Insert `include Makefile.venv` at the bottom of your Makefile to enable these +# rules. +# +# When writing your Makefile use '$(VENV)/python' to refer to the Python +# interpreter within virtual environment and '$(VENV)/executablename' for any +# other executable in venv. +# +# This Makefile provides the following targets: +# venv +# Use this as a dependency for any target that requires virtual +# environment to be created and configured +# python, ipython +# Use these to launch interactive Python shell within virtual environment +# shell, bash, zsh +# Launch interactive command line shell. "shell" target launches the +# default shell Makefile executes its rules in (usually /bin/sh). +# "bash" and "zsh" can be used to refer to the specific desired shell. +# show-venv +# Show versions of Python and pip, and the path to the virtual environment +# clean-venv +# Remove virtual environment +# $(VENV)/executable_name +# Install `executable_name` with pip. Only packages with names matching +# the name of the corresponding executable are supported. +# Use this as a lightweight mechanism for development dependencies +# tracking. E.g. for one-off tools that are not required in every +# developer's environment, therefore are not included into +# requirements.txt or setup.py. +# Note: +# Rules using such target or dependency MUST be defined below +# `include` directive to make use of correct $(VENV) value. +# Example: +# codestyle: $(VENV)/pyflakes +# $(VENV)/pyflakes . +# See `ipython` target below for another example. +# +# This Makefile can be configured via following variables: +# PY +# Command name for system Python interpreter. It is used only initially to +# create the virtual environment +# Default: python3 +# REQUIREMENTS_TXT +# Space separated list of paths to requirements.txt files. +# Paths are resolved relative to current working directory. +# Default: requirements.txt +# +# Non-existent files are treated as hard dependencies, +# recipes for creating such files must be provided by the main Makefile. +# Providing empty value (REQUIREMENTS_TXT=) turns off processing of +# requirements.txt even when the file exists. +# SETUP_PY +# Space separated list of paths to setup.py files. +# Corresponding packages will be installed into venv in editable mode +# along with all their dependencies +# Default: setup.py +# +# Non-existent and empty values are treated in the same way as for REQUIREMENTS_TXT. +# WORKDIR +# Parent directory for the virtual environment. +# Default: current working directory. +# VENVDIR +# Python virtual environment directory. +# Default: $(WORKDIR)/.venv +# +# This Makefile was written for GNU Make and may not work with other make +# implementations. +# +# +# Copyright (c) 2019-2020 Vitaly Potyarkin +# +# Licensed under the Apache License, Version 2.0 +# +# + + +# +# Configuration variables +# + +WORKDIR?=. +VENVDIR?=$(WORKDIR)/.venv +REQUIREMENTS_TXT?=$(wildcard requirements.txt) # Multiple paths are supported (space separated) +SETUP_PY?=$(wildcard setup.py) # Multiple paths are supported (space separated) +SETUP_CFG?=$(foreach s,$(SETUP_PY),$(wildcard $(patsubst %setup.py,%setup.cfg,$(s)))) +MARKER=.initialized-with-Makefile.venv + + +# +# Python interpreter detection +# + +_PY_AUTODETECT_MSG=Detected Python interpreter: $(PY). Use PY environment variable to override + +ifeq (ok,$(shell test -e /dev/null 2>&1 && echo ok)) +NULL_STDERR=2>/dev/null +else +NULL_STDERR=2>NUL +endif + +ifndef PY +_PY_OPTION:=python3 +ifeq (ok,$(shell $(_PY_OPTION) -c "print('ok')" $(NULL_STDERR))) +PY=$(_PY_OPTION) +endif +endif + +ifndef PY +_PY_OPTION:=$(VENVDIR)/bin/python +ifeq (ok,$(shell $(_PY_OPTION) -c "print('ok')" $(NULL_STDERR))) +PY=$(_PY_OPTION) +$(info $(_PY_AUTODETECT_MSG)) +endif +endif + +ifndef PY +_PY_OPTION:=$(subst /,\,$(VENVDIR)/Scripts/python) +ifeq (ok,$(shell $(_PY_OPTION) -c "print('ok')" $(NULL_STDERR))) +PY=$(_PY_OPTION) +$(info $(_PY_AUTODETECT_MSG)) +endif +endif + +ifndef PY +_PY_OPTION:=py -3 +ifeq (ok,$(shell $(_PY_OPTION) -c "print('ok')" $(NULL_STDERR))) +PY=$(_PY_OPTION) +$(info $(_PY_AUTODETECT_MSG)) +endif +endif + +ifndef PY +_PY_OPTION:=python +ifeq (ok,$(shell $(_PY_OPTION) -c "print('ok')" $(NULL_STDERR))) +PY=$(_PY_OPTION) +$(info $(_PY_AUTODETECT_MSG)) +endif +endif + +ifndef PY +define _PY_AUTODETECT_ERR +Could not detect Python interpreter automatically. +Please specify path to interpreter via PY environment variable. +endef +$(error $(_PY_AUTODETECT_ERR)) +endif + + +# +# Internal variable resolution +# + +VENV=$(VENVDIR)/bin +EXE= +# Detect windows +ifeq (win32,$(shell $(PY) -c "import __future__, sys; print(sys.platform)")) +VENV=$(VENVDIR)/Scripts +EXE=.exe +endif + +touch=touch $(1) +ifeq (,$(shell command -v touch $(NULL_STDERR))) +# https://ss64.com/nt/touch.html +touch=type nul >> $(subst /,\,$(1)) && copy /y /b $(subst /,\,$(1))+,, $(subst /,\,$(1)) +endif + +RM?=rm -f +ifeq (,$(shell command -v $(firstword $(RM)) $(NULL_STDERR))) +RMDIR:=rd /s /q +else +RMDIR:=$(RM) -r +endif + + +# +# Virtual environment +# + +.PHONY: venv +venv: $(VENV)/$(MARKER) + +.PHONY: clean-venv +clean-venv: + -$(RMDIR) "$(VENVDIR)" + +.PHONY: show-venv +show-venv: venv + @$(VENV)/python -c "import sys; print('Python ' + sys.version.replace('\n',''))" + @$(VENV)/pip --version + @echo venv: $(VENVDIR) + +.PHONY: debug-venv +debug-venv: + @echo "PATH (Shell)=$$PATH" + @$(MAKE) --version + $(info PATH (GNU Make)="$(PATH)") + $(info SHELL="$(SHELL)") + $(info PY="$(PY)") + $(info REQUIREMENTS_TXT="$(REQUIREMENTS_TXT)") + $(info SETUP_PY="$(SETUP_PY)") + $(info SETUP_CFG="$(SETUP_CFG)") + $(info VENVDIR="$(VENVDIR)") + $(info VENVDEPENDS="$(VENVDEPENDS)") + $(info WORKDIR="$(WORKDIR)") + + +# +# Dependencies +# + +ifneq ($(strip $(REQUIREMENTS_TXT)),) +VENVDEPENDS+=$(REQUIREMENTS_TXT) +endif + +ifneq ($(strip $(SETUP_PY)),) +VENVDEPENDS+=$(SETUP_PY) +endif +ifneq ($(strip $(SETUP_CFG)),) +VENVDEPENDS+=$(SETUP_CFG) +endif + +$(VENV): + $(PY) -m venv $(VENVDIR) + $(VENV)/python -m pip install --upgrade pip setuptools wheel + +$(VENV)/$(MARKER): $(VENVDEPENDS) | $(VENV) +ifneq ($(strip $(REQUIREMENTS_TXT)),) + $(VENV)/pip install $(foreach path,$(REQUIREMENTS_TXT),-r $(path)) +endif +ifneq ($(strip $(SETUP_PY)),) + $(VENV)/pip install $(foreach path,$(SETUP_PY),-e $(dir $(path))) +endif + $(call touch,$(VENV)/$(MARKER)) + + +# +# Interactive shells +# + +.PHONY: python +python: venv + exec $(VENV)/python + +.PHONY: ipython +ipython: $(VENV)/ipython + exec $(VENV)/ipython + +.PHONY: shell +shell: venv + . $(VENV)/activate && exec $(notdir $(SHELL)) + +.PHONY: bash zsh +bash zsh: venv + . $(VENV)/activate && exec $@ + + +# +# Commandline tools (wildcard rule, executable name must match package name) +# + +ifneq ($(EXE),) +$(VENV)/%: $(VENV)/%$(EXE) ; +.PHONY: $(VENV)/% +.PRECIOUS: $(VENV)/%$(EXE) +endif + +$(VENV)/%$(EXE): $(VENV)/$(MARKER) + $(VENV)/pip install --upgrade $* + $(call touch,$@) diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..1d17dae13 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +.venv diff --git a/docs/index.md b/docs/index.md index 9a751bf54..b60c9e15f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,10 +6,12 @@ OLM v0 was focused on providing unique support for these specific needs for a pa Operators are classified as one or more Kubernetes controllers, shipping with one or more API extensions (CustomResourceDefinitions) to provide additional functionality to the cluster. After running OLM v0 in production clusters for a number of years, it became apparent that there's an appetite to deviate from this coupling of CRDs and controllers, to encompass the lifecycling of extensions that are not just operators. OLM has been helping to define lifecycles for these extensions in which the extensions -* get installed, potentially causing other extensions to be installed as well as dependencies -* get customized with the help of customizable configuration at runtime -* get upgraded to newer version/s following upgrade paths defined by extension developers -* and finally, get decommissioned and removed. + + * get installed, potentially causing other extensions to be installed as well as dependencies + * get customized with the help of customizable configuration at runtime + * get upgraded to newer version/s following upgrade paths defined by extension developers + * and finally, get decommissioned and removed. + In the dependency model, extensions can rely on each other for required services that are out of scope of the primary purpose of an extension, allowing each extension to focus on a specific purpose. OLM also prevents conflicting extensions from running on the cluster, either with conflicting dependency constraints or conflicts in ownership of services provided via APIs. Since cluster extensions need to be supported with an enterprise-grade product lifecycle, there has been a growing need for allowing operator authors to limit installation and upgrade of their extension by specifing addtional environmental constraints as dependencies, primarily to align with what was tested by the operator author's QE processes. In other words, there is an evergrowing ask for OLM to allow the author to enforce these support limitations in the form of additional constraints specified by operator authors in their packaging for OLM. @@ -19,14 +21,15 @@ During their lifecycle on the cluster, OLM also manages the permissions and capa OLM also defines a packaging model in which catalogs of extensions, usually containing the entire version history of each extension, are made available to clusters for cluster users to browse and select from. While these catalogs have so far been packaged and shipped as container images, there is a growing appetite to allow more ways of packaging and shipping these catalogs, besides also simplifying the building process of these catalogs, which so far have been very costly. The effort to bring down the cost was kicked off in OLM v0 with conversion of the underlying datastore for catalog metadata to [File-based Catalogs](https://olm.operatorframework.io/docs/reference/file-based-catalogs/), with more effort being invested to slim down the process in v1. Via new versions of extensions delivered with this new packaging system, OLM will be able to apply updates to existing running extensions on the cluster in a way where the integrity of the cluster is maintained and constraints and dependencies are kept satisfied. -For a detailed writeup of OLM v1 requirements, please read the [Product Requiment Documentation](github.com/operator-framework/operator-controller/olmv1_roadmap.md) +For a detailed writeup of OLM v1 requirements, please read the [Product Requirements Documentation](olmv1_roadmap.md) # The OLM community -The OLM v1 project is being tracked in a [Github project](https://github.com/orgs/operator-framework/projects/8/) +The OLM v1 project is being tracked in a [GitHub project](https://github.com/orgs/operator-framework/projects/8/) + +You can reach out to the OLM community for feedbacks/discussions/contributions in the following channels: -You can reach out to the OLM community for feedbacks/discussions/contributions in the following channels: -* Kubernetes slack channel: [#olm-dev](https://kubernetes.slack.com/messages/olm-dev) -* [Operator Framework on Google Groups](https://groups.google.com/forum/#!forum/operator-framework) + * Kubernetes slack channel: [#olm-dev](https://kubernetes.slack.com/messages/olm-dev) + * [Operator Framework on Google Groups](https://groups.google.com/forum/#!forum/operator-framework) diff --git a/olmv1_roadmap.md b/docs/olmv1_roadmap.md similarity index 100% rename from olmv1_roadmap.md rename to docs/olmv1_roadmap.md diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..0b6968098 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,35 @@ +Babel==2.12.1 +beautifulsoup4==4.12.2 +certifi==2023.7.22 +charset-normalizer==3.2.0 +click==8.1.7 +colorama==0.4.6 +cssselect==1.2.0 +ghp-import==2.1.0 +idna==3.4 +Jinja2==3.1.2 +lxml==4.9.3 +Markdown==3.4.4 +markdown2==2.4.10 +MarkupSafe==2.1.3 +mergedeep==1.3.4 +mkdocs==1.5.2 +mkdocs-material==9.2.3 +mkdocs-material-extensions==1.1.1 +packaging==23.1 +paginate==0.5.6 +pathspec==0.11.2 +platformdirs==3.10.0 +Pygments==2.16.1 +pymdown-extensions==10.1 +pyquery==2.0.0 +python-dateutil==2.8.2 +PyYAML==6.0.1 +pyyaml_env_tag==0.1 +readtime==3.0.0 +regex==2023.8.8 +requests==2.31.0 +six==1.16.0 +soupsieve==2.4.1 +urllib3==2.0.4 +watchdog==3.0.0 diff --git a/mkdocs.yml b/mkdocs.yml index 01701574d..ed9ee0437 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -6,11 +6,11 @@ theme: repo_url: https://github.com/operator-framework/operator-controller -nav: +nav: - Home: 'index.md' - Components: 'components.md' - - Tasks: - - Adding a catalog of operators: 'adding-a-catalog.md' - - Explore operators available for installation: 'explore-available-packages.md' - - Installing an operator: 'installing-an-operator.md' - - Deleting an operator: 'deleting-an-operator.md' + - Tasks: + - Adding a catalog of operators: 'Tasks/adding-a-catalog.md' + - Explore operators available for installation: 'Tasks/explore-available-packages.md' + - Installing an operator: 'Tasks/installing-an-operator.md' + - Deleting an operator: 'Tasks/uninstall-an-operator.md'