diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 708ff272becf..e7733f6ceb43 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "image":"mcr.microsoft.com/devcontainers/universal:2", - "postCreateCommand": "sleep 30 && docker run --rm -it -v `pwd`:/docs:Z quay.io/openshift-cs/asciibinder sh -c \"git config --global --add safe.directory /docs && asciibinder build --distro openshift-rosa && asciibinder build --distro openshift-enterprise\" && python3 -m http.server -d ./_preview", + "postCreateCommand": "sleep 60 && docker run --rm -it -v `pwd`:/docs:Z quay.io/openshift-cs/asciibinder sh -c \"git config --global --add safe.directory /docs && asciibinder build --distro openshift-rosa && asciibinder build --distro openshift-enterprise\" && python3 -m http.server -d ./_preview", "customizations": { "vscode": { "settings": { diff --git a/.komment/00000.json b/.komment/00000.json new file mode 100644 index 000000000000..35037e0ea561 --- /dev/null +++ b/.komment/00000.json @@ -0,0 +1,449 @@ +[ + { + "name": "build_for_portal.py", + "path": "build_for_portal.py", + "content": { + "structured": { + "description": "A tool that builds and synchronizes documentation files from an upstream source repository into a local directory structure for distribution in various formats such as HTML, PDF, and EPUB. The process involves fetching the latest sources, filtering books based on the specified distribution, building master files, reformating data for Drupal, and pushing changes back to GitLab repositories.", + "items": [ + { + "id": "6483b725-6e6f-b3af-7249-43be1b68f367", + "ancestors": [], + "description": "Defines and returns an instance of an ArgumentParser, which allows for parsing command-line arguments. It sets up various command-line options with default values and descriptions, enabling the user to customize the behavior of a program or script.", + "params": [], + "returns": { + "type_name": "argparseArgumentParser", + "description": "An instance of ArgumentParser. This object will be used to parse command-line arguments and provide help messages when necessary." + }, + "usage": { + "language": "python", + "code": "parser = setup_parser()\nargs = parser.parse_args([\"--distro\", \"openshift-enterprise\"])\n", + "description": "" + }, + "name": "setup_parser", + "location": { + "start": 126, + "insert": 127, + "offset": " ", + "indent": 4, + "comment": null + }, + "item_type": "function", + "length": 40, + "docLength": null + }, + { + "id": "e1320c24-5e9f-23ae-3347-d0fc4de67b9f", + "ancestors": [ + "1cd2200b-fb3f-4eb6-9147-990d3b577a0a" + ], + "description": "Constructs a string representing a directory entry based on its name and depth. It appends this string to the `master_entries` list if either the `include_name` flag is set or the depth exceeds zero.", + "params": [ + { + "name": "dir_node", + "type_name": "Dict[str, Any]", + "description": "Expected to contain directory information, specifically 'Name' attribute which represents the name of the directory." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Passed as an argument to this callback function, representing the current directory being processed." + }, + { + "name": "depth", + "type_name": "int", + "description": "0-based, indicating the current directory level being processed. It affects the indentation of the directory name in the output list by adding leading spaces based on its value." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "dir_node = {\"Name\": \"Documents\"}\nparent_dir = \"\"\ndepth = 0\ndir_callback(dir_node, parent_dir, depth)\nmaster_entries.append(...)", + "description": "" + }, + "name": "dir_callback", + "location": { + "start": 387, + "insert": 388, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 5, + "docLength": null + }, + { + "id": "09ffafc5-fda6-5bb7-404d-af1a905ce7bc", + "ancestors": [ + "1cd2200b-fb3f-4eb6-9147-990d3b577a0a" + ], + "description": "Processes a topic node and adds its book file path to the master entries list as an include statement. It handles specific cases for all-in-one mode and comment files, appending additional comments as needed.", + "params": [ + { + "name": "topic_node", + "type_name": "Dict[any, any]", + "description": "Expected to contain information about a topic node." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Used as the parent directory to construct the full path for the book file. It is joined with the topic node's \"File\" attribute and \".adoc\" extension to form the full file path." + }, + { + "name": "depth", + "type_name": "int", + "description": "Used to calculate the level offset for an Asciidoctor include directive. This level offset determines the nesting level of included files in the generated documentation." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "topic_node = {\"File\": \"main\"}\nparent_dir = \"/path/to/main\"\nbook_dir = \"/path/to/book\"\nall_in_one = True\nCOMMENT_FILES = [\"file1\", \"file2\"]\n\ntopic_callback(topic_node, parent_dir, 0)", + "description": "" + }, + "name": "topic_callback", + "location": { + "start": 393, + "insert": 394, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 11, + "docLength": null + }, + { + "id": "a211b250-8c76-b3be-8449-7f822234c1cb", + "ancestors": [ + "c24a6808-bdb8-f38e-1041-7a069d23c354" + ], + "description": "Traverses a directory tree and copies all image files from subdirectories to a specified destination directory (`dest_dir`). It takes three parameters: the current directory node, its parent directory, and the current depth.", + "params": [ + { + "name": "dir_node", + "type_name": "Dict[any, any]", + "description": "Expected to contain information about a directory node in a file system tree structure. It is assumed that this dictionary contains key-value pairs for \"Dir\" and possibly other relevant details." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "The directory path from which the current directory node (`dir_node`) is located, used as the base for constructing file paths." + }, + { + "name": "depth", + "type_name": "int", + "description": "Used to track the level of recursion when traversing directory trees." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "dir_nodes = [{\"Dir\": \"dir1\"}, {\"Dir\": \"dir2\"}]\ndest_dir = \"/path/to/destination\"\nfor node in dir_nodes:\n dir_callback(node, \"/\", 0)\n", + "description": "" + }, + "name": "dir_callback", + "location": { + "start": 470, + "insert": 471, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 8, + "docLength": null + }, + { + "id": "ca792679-ca97-4a84-de47-06200812ffaf", + "ancestors": [ + "b7b7e365-d519-ef80-c44c-a0aa63921384" + ], + "description": "Creates a destination directory path by joining the `dest_dir`, `parent_dir`, and `dir_node[\"Dir\"]`. It then ensures that the created directory exists using the `ensure_directory` function.", + "params": [ + { + "name": "dir_node", + "type_name": "Dict[any, any]", + "description": "Assumed to be a node from a directory tree, containing information such as the name of the directory (`\"Dir\"`), its children, and possibly other relevant data." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Used to construct the destination directory path by joining it with the current directory (`dir_node`) and the destination root directory (`dest_dir`)." + }, + { + "name": "depth", + "type_name": "int", + "description": "Used to represent the level of depth for the current directory being processed. It does not seem to have any direct effect on the function's behavior, but it might be used elsewhere in the code." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "dir_node = {\"Dir\": \"new_dir\"}\nparent_dir = \"source\"\ndest_dir = \"destination\"\ndepth = 1\ndir_callback(dir_node, parent_dir, depth)", + "description": "" + }, + "name": "dir_callback", + "location": { + "start": 488, + "insert": 489, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 3, + "docLength": null + }, + { + "id": "59a30cb9-bbe4-3a8e-c241-1ed7911ead22", + "ancestors": [ + "b7b7e365-d519-ef80-c44c-a0aa63921384" + ], + "description": "Copies an Adoc file from a source directory to a destination directory based on topic and parent directory information provided in the `topic_node` dictionary. It also maintains the directory structure during the copying process.", + "params": [ + { + "name": "topic_node", + "type_name": "Dict", + "description": "Expected to contain keys \"File\" that specifies the name of an Asciidoc file, representing a topic or chapter within the document." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Used as a directory name within both source (`node_src_dir`) and destination (`node_dest_dir`) paths, allowing for hierarchical organization of files in the topic node." + }, + { + "name": "depth", + "type_name": "int", + "description": "Used to calculate the path of the source directory for each topic node based on its depth from the root." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "topic_node = {\"File\": \"my_file\"}\nparent_dir = \"path/to/parent\"\ndepth = 1\ntopic_callback(topic_node, parent_dir, depth)", + "description": "" + }, + "name": "topic_callback", + "location": { + "start": 492, + "insert": 493, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 8, + "docLength": null + }, + { + "id": "e5a5f5a5-c911-958e-7a4f-bf0f9cdce980", + "ancestors": [], + "description": "Retrieves the name of a book from a specified directory. It searches through a list of book nodes and returns the matching book name if found; otherwise, it logs an error and returns the directory path.", + "params": [ + { + "name": "dir", + "type_name": "str", + "description": "Expected to match with the \"Dir\" key value in one of the book nodes in the `info[\"book_nodes\"]` list." + }, + { + "name": "src_file", + "type_name": "str", + "description": "Passed to the log error message. It represents the source file name, used as context for error logging when a book is not found for a given directory." + }, + { + "name": "info", + "type_name": "Dict[any, any]", + "description": "Expected to have a key named \"book_nodes\" with values that are dictionaries having keys \"Dir\" and \"Name\"." + } + ], + "returns": { + "type_name": "str|None", + "description": "1) the name of a book if the directory is found, or 2) the directory itself if no matching book is found and an error message has been logged." + }, + "usage": { + "language": "python", + "code": "info = {\"book_nodes\": [{\"Dir\": \"dir1\", \"Name\": \"Book1\"}, {\"Dir\": \"dir2\", \"Name\": \"Book2\"}]}\nsrc_file = \"file.txt\"\nresult = dir_to_book_name(\"dir1\", src_file, info)\n", + "description": "" + }, + "name": "dir_to_book_name", + "location": { + "start": 739, + "insert": 741, + "offset": " ", + "indent": 4, + "comment": null + }, + "item_type": "function", + "length": 12, + "docLength": null + }, + { + "id": "5527b173-9819-8288-bb4a-0cb5b2b70b61", + "ancestors": [ + "0da26043-68d2-308e-2043-8ae56108c114" + ], + "description": "Extracts file IDs from a source file and appends them to a list called `book_ids`. The file is located by joining a parent directory with the name of a topic node, adding \".adoc\" as the file extension.", + "params": [ + { + "name": "topic_node", + "type_name": "Dict", + "description": "Expected to contain a set of key-value pairs, where at least one key is \"File\", which represents the path of an AsciiDoc file." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Used to construct the path to a source file by joining it with the \"File\" attribute of the `topic_node`." + }, + { + "name": "depth", + "type_name": "int", + "description": "Used as an input for the recursive callback process to traverse the topic tree, representing the current level of nesting." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "topic_node = {\"File\": \"my_topic\"}\nparent_dir = \"/path/to/my/adoc/files\"\nbook_ids = []\ntopic_callback(topic_node, parent_dir, 0)", + "description": "" + }, + "name": "topic_callback", + "location": { + "start": 910, + "insert": 911, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 4, + "docLength": null + }, + { + "id": "b7b3ec81-e926-1092-eb47-8bd398ba8a47", + "ancestors": [ + "6d8ae864-af5c-bdbc-f74e-3a7f52dda325" + ], + "description": "Generates a source file path and maps it to an ID based on the topic node's name. It uses the parent directory and topic node's \"File\" attribute to create the source file path, then associates it with an ID using the `build_file_id` function.", + "params": [ + { + "name": "topic_node", + "type_name": "Dict[str, Any]", + "description": "Assumed to contain key-value pairs representing metadata about a topic in an AsciiDoc document." + }, + { + "name": "parent_dir", + "type_name": "str", + "description": "Used to construct the full path of the source file by joining it with the topic node's \"File\" attribute." + }, + { + "name": "depth", + "type_name": "int", + "description": "Not used within the given code snippet. It likely serves as an optional input to control recursion or indentation in the file generation process, but its purpose remains unclear without additional context." + } + ], + "returns": null, + "usage": { + "language": "python", + "code": "topic_node = {\"File\": \"main\", \"Name\": \"Introduction\"}\nparent_dir = \"/path/to/parent/directory\"\ndepth = 0\nfile_to_id_map = {}\nexisting_ids = set()\ntopic_callback(topic_node, parent_dir, depth)\n", + "description": "" + }, + "name": "topic_callback", + "location": { + "start": 926, + "insert": 927, + "offset": " ", + "indent": 8, + "comment": null + }, + "item_type": "function", + "length": 5, + "docLength": null + }, + { + "id": "481aba77-e533-89bf-ca48-8aafbab2b315", + "ancestors": [], + "description": "Synchronizes directories by removing files and subdirectories from the right directory that exist only on the left side, copying or moving files and subdirectories that are common or exist only on the left side to the right side.", + "name": "_sync_directories_dircmp", + "location": { + "start": 1064, + "insert": 1066, + "offset": " ", + "indent": 4, + "comment": null + }, + "item_type": "function", + "length": 18, + "docLength": null + }, + { + "id": "e2e45fd8-6bb8-aeb3-6742-87fe4cf73a64", + "ancestors": [], + "description": "Reads a configuration file and parses its contents based on the provided distribution and version. It extracts relevant information from the configuration file into a dictionary, which represents the repository URLs for the specified combination of distro and version.", + "params": [ + { + "name": "config_file", + "type_name": "str", + "description": "The path to the configuration file that needs to be read for parsing the repository configuration." + }, + { + "name": "distro", + "type_name": "str", + "description": "Expected to be a name of a distribution, for example, \"ubuntu\" or \"centos\". It is used to construct a section name for parsing configuration from the config file." + }, + { + "name": "version", + "type_name": "str", + "description": "Used as part of the name of a section in the config file that it reads from." + } + ], + "returns": { + "type_name": "Dict[str,str]", + "description": "A dictionary containing URLs for different repository keys. The keys are obtained from the section name specified by the distro and version parameters." + }, + "usage": { + "language": "python", + "code": "parse_repo_config('path/to/config/file', 'ubuntu', '20.04')", + "description": "" + }, + "name": "parse_repo_config", + "location": { + "start": 1113, + "insert": 1115, + "offset": " ", + "indent": 4, + "comment": null + }, + "item_type": "function", + "length": 15, + "docLength": null + }, + { + "id": "0c6d6139-3e8c-fb9b-194c-1f6969f91c94", + "ancestors": [], + "description": "Processes command-line arguments to build and package Drupal files. It fetches upstream sources, parses a configuration file, builds files, reorganizes them for Drupal, and pushes changes back to GitLab if necessary.", + "params": [], + "returns": null, + "usage": { + "language": "python", + "code": "if __name__ == \"__main__\":\n main(title='Sample Title', \n author='Author Name', \n product='Product Name', \n distro='Distro', \n version='Version Number', \n no_upstream_fetch=False, \n all_in_one=True, \n push=True)\n", + "description": "" + }, + "name": "main", + "location": { + "start": 1131, + "insert": 1132, + "offset": " ", + "indent": 4, + "comment": null + }, + "item_type": "function", + "length": 74, + "docLength": null + } + ] + } + } + } +] \ No newline at end of file diff --git a/.komment/komment.json b/.komment/komment.json new file mode 100644 index 000000000000..784618b26367 --- /dev/null +++ b/.komment/komment.json @@ -0,0 +1,15 @@ +{ + "meta": { + "version": "1", + "updated_at": "2024-08-16T07:41:03.395Z", + "created_at": "2024-08-16T07:41:06.570Z", + "pipelines": [ + "91c685cd-989b-4cc4-9eef-cf2b8ab8d14d" + ] + }, + "lookup": [ + [ + "build_for_portal.py" + ] + ] +} \ No newline at end of file diff --git a/.s2i/httpd-cfg/01-commercial.conf b/.s2i/httpd-cfg/01-commercial.conf index 1b6f7314daa6..0d375cf38e95 100644 --- a/.s2i/httpd-cfg/01-commercial.conf +++ b/.s2i/httpd-cfg/01-commercial.conf @@ -138,13 +138,13 @@ AddType text/vtt vtt # ACS Redirects to go to the latest version - change here when a new version drops # it should probably be best to combine the next few lines in one reg exp but for clarity keeping them separate # the first one redirects - RewriteRule ^acs/?$ /acs/4.4/welcome/index.html [R=301] + RewriteRule ^acs/?$ /acs/4.5/welcome/index.html [R=301] # redirect to the latest release notes - RewriteRule ^acs/release_notes/?$ /acs/4.4/release_notes/44-release-notes.html [R=301,NE] + RewriteRule ^acs/release_notes/?$ /acs/4.5/release_notes/45-release-notes.html [R=301,NE] # redirect from ACS CLoud Service page - RewriteRule ^acs/installing/install-ocp-operator.html /acs/4.4/installing/installing_ocp/init-bundle-ocp.html [NE,R=301] - RewriteRule ^acs/(\D.*)$ /acs/4.4/$1 [NE,R=301] - RewriteRule ^acs/(3\.65|3\.66|3\.67|3\.68|3\.69|3\.70|3\.71|3\.72|3\.73|3\.74|4\.0|4\.1|4\.2|4\.3|4\.4)/?$ /acs/$1/welcome/index.html [L,R=301] + RewriteRule ^acs/installing/install-ocp-operator.html /acs/4.5/installing/installing_ocp/init-bundle-ocp.html [NE,R=301] + RewriteRule ^acs/(\D.*)$ /acs/4.5/$1 [NE,R=301] + RewriteRule ^acs/(3\.65|3\.66|3\.67|3\.68|3\.69|3\.70|3\.71|3\.72|3\.73|3\.74|4\.0|4\.1|4\.2|4\.3|4\.4|4\.5)/?$ /acs/$1/welcome/index.html [L,R=301] #redirect for 4.0 Manage vulneribility page RewriteRule ^(acs/(?:4\.0/)?)?operating/manage-vulnerabilities\.html$ /acs/4.0/operating/manage-vulnerabilities/vulnerability-management.html [NE,R=301] #redirect for missing 4.3 page @@ -153,8 +153,10 @@ AddType text/vtt vtt RewriteRule ^(acs/(?:4\.4/)?)?installing/acs-installation-platforms\.html$ /acs/4.4/installing/acs-high-level-overview.html [NE,R=301] #redirect Architecture page for ACS Cloud into Cloud directory RewriteRule ^acs/(4\.4)/architecture/acscs-architecture\.html$ /acs/latest/cloud_service/acscs-architecture.html [NE,R=302] + #redirect Managing compliance and Compliance Operator pages as per https://github.com/openshift/openshift-docs/pull/78035 + RewriteRule ^acs/(4\.5)/operating/manage-compliance-operator/compliance-operator-rhacs\.html$ /acs/latest/operating/compliance-operator-rhacs.html [NE,R=302] - # remove aro docs to just the welcome page + #remove aro docs to just the welcome page RewriteRule aro/4/(?!welcome)(.*)?$ /container-platform/latest/$1 [L,R=301] # Redirects for "latest" version @@ -173,6 +175,9 @@ AddType text/vtt vtt # Redirect for renamed external DNS page RewriteRule ^container-platform/4\.10/networking/external_dns_operator/nw-installing-external-dns-operator.html /container-platform/4.10/networking/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.html [NE,R=301] + # Redirect for renamed Ingress Controller document + RewriteRule ^container-platform/(4\.1[2-6])/networking/nw-ingress-controller-endpoint-publishing-strategies\.html$ /container-platform/$1/networking/nw-configuring-ingress-controller-endpoint-publishing-strategy.html [NE,R=302,L] + # Redirect for cluster logging per Ashleigh Brennan RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/troubleshooting/cluster-logging-must-gather.html /container-platform/$1/logging/cluster-logging-support.html [NE,R=302] RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/config/cluster-logging-maintenance-support.html /container-platform/$1/logging/cluster-logging-support.html [NE,R=302] @@ -182,42 +187,32 @@ AddType text/vtt vtt RewriteRule ^(rosa|dedicated)/logging/config/cluster-logging-moving-nodes.html /$1/logging/scheduling_resources/logging-node-selectors.html [NE,R=302] RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/logging/config/cluster-logging-log-store.html /container-platform/$1/logging/log_storage/logging-config-es-store.html [NE,R=302] - # Redirects for observability rework per https://github.com/openshift/openshift-docs/pull/71248 - RewriteRule ^container-platform/4.14/power_monitoring/(.*)$ /container-platform/4.14/observability/power_monitoring/$1 [NE,R=302,L] - - # Redirects for observability/monitoring per https://github.com/openshift/openshift-docs/pull/74679/ - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/observability/monitoring/cluster_observability_operator/configuring-the-cluster-observability-operator-to-monitor-a-service.html /container-platform/$1/observability/cluster_observability_operator/configuring-the-cluster-observability-operator-to-monitor-a-service.html [NE,R=302] - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/observability/monitoring/cluster_observability_operator/installing-the-cluster-observability-operator.html /container-platform/$1/observability/cluster_observability_operator/installing-the-cluster-observability-operator.html [NE,R=302] - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/observability/monitoring/cluster_observability_operator/cluster-observability-operator-overview.html /container-platform/$1/observability/cluster_observability_operator/cluster-observability-operator-overview.html [NE,R=302] - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/observability/monitoring/cluster_observability_operator/cluster-observability-operator-release-notes.html /container-platform/$1/observability/cluster_observability_operator/cluster-observability-operator-release-notes.html [NE,R=302] - - # Redirects for observability reorg per mleonov and https://github.com/openshift/openshift-docs/pull/74104 - # including logging per https://github.com/openshift/openshift-docs/pull/74819 - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/distr_tracing/distr_tracing_rn/distr-tracing-rn-?(.*)$ /container-platform/$1/observability/distr_tracing/distr_tracing_rn/distr-tracing-rn-past-releases.html [NE,R=302] - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/otel/otel_rn/otel-rn-?(.*)$ /container-platform/$1/observability/otel/otel_rn/otel-rn-past-releases.html [NE,R=302] - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/(logging|monitoring|distr_tracing|otel)/?(.*)$ /container-platform/$1/observability/$2/$3 [NE,R=302] - RewriteRule ^container-platform/4.13/distr_tracing/distr_tracing_otel/distr-tracing-otel-installing.html /container-platform/4.13/observability/otel/otel-installing.html [NE,R=302] - RewriteRule ^container-platform/4.13/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.html /container-platform/4.13/observability/otel/otel-installing.html [NE,R=302] - RewriteRule ^container-platform/4.9/distr_tracing/distributed-tracing-release-notes.html /container-platform/latest/observability/distr_tracing/distr_tracing_rn/distr-tracing-rn-past-releases.html [NE,R=302] - RewriteRule ^container-platform/4.9/distr_tracing/distr_tracing_install/distr-tracing-installing.html /container-platform/latest/observability/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.html [NE,R=302] + # Consolidated redirects for observability, monitoring, logging, and network observability + + # Generic rule for moving sections under observability + RewriteRule ^container-platform/(4\.1[2-5]|latest)/(logging|monitoring|distr_tracing|otel|network_observability)/?(.*)$ /container-platform/$1/observability/$2/$3 [NE,R=302] + + # Generic rule for ROSA and OpenShift Dedicated RewriteRule ^(rosa|dedicated)/(logging|monitoring)/?(.*)$ /$1/observability/$2/$3 [NE,R=302] + # Specific rules for Cluster Observability Operator + RewriteRule ^container-platform/(4\.1[2-5]|latest)/monitoring/cluster_observability_operator/(.*)$ /container-platform/$1/observability/cluster_observability_operator/$2 [NE,R=302] - # Redirects for network observability per https://github.com/openshift/openshift-docs/pull/73554 - RewriteRule ^container-platform/(4\.12|4\.13|4\.14|4\.15)/network_observability/?(.*)$ /container-platform/$1/observability/network_observability/$2 [NE,R=302] + # Specific redirects for distributed tracing and OpenTelemetry + RewriteRule ^container-platform/(4\.1[2-5]|latest)/distr_tracing/distr_tracing_rn/distr-tracing-rn-?(.*)$ /container-platform/$1/observability/distr_tracing/distr_tracing_rn/distr-tracing-rn-past-releases.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-5]|latest)/otel/otel_rn/otel-rn-?(.*)$ /container-platform/$1/observability/otel/otel_rn/otel-rn-past-releases.html [NE,R=302] - # Redirect for log collection forwarding per https://github.com/openshift/openshift-docs/pull/64406 - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/config/cluster-logging-collector.html /container-platform/$1/logging/log_collection_forwarding/cluster-logging-collector.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/cluster-logging-eventrouter.html /container-platform/$1/logging/log_collection_forwarding/cluster-logging-eventrouter.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/cluster-logging-enabling-json-logging.html /container-platform/$1/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/logging/cluster-logging-external.html /container-platform/$1/logging/log_collection_forwarding/log-forwarding.html [NE,R=302] + # Specific redirects for older versions and special cases + RewriteRule ^container-platform/(4\.13|latest)/distr_tracing/distr_tracing_otel/distr-tracing-otel-installing.html /container-platform/$1/observability/otel/otel-installing.html [NE,R=302] + RewriteRule ^container-platform/(4\.13|latest)/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.html /container-platform/$1/observability/otel/otel-installing.html [NE,R=302] + RewriteRule ^container-platform/4.9/distr_tracing/distributed-tracing-release-notes.html /container-platform/latest/observability/distr_tracing/distr_tracing_rn/distr-tracing-rn-past-releases.html [NE,R=302] + RewriteRule ^container-platform/4.9/distr_tracing/distr_tracing_install/distr-tracing-installing.html /container-platform/latest/observability/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.html [NE,R=302] + + # Redirects for log collection forwarding (for versions 4.11 to 4.13 and latest) + RewriteRule ^container-platform/(4\.1[1-3]|latest)/logging/(.*)$ /container-platform/$1/logging/log_collection_forwarding/$2 [NE,R=302] - # Redirect for network observability per https://github.com/openshift/openshift-docs/pull/65770 - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/networking/network_observability/network-observability-operator-release-notes.html /container-platform/$1/network_observability/network-observability-operator-release-notes.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/networking/network_observability/installing-operators.html /container-platform/$1/network_observability/installing-operators.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/networking/network_observability/observing-network-traffic.html /container-platform/$1/network_observability/observing-network-traffic.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/networking/network_observability/understanding-network-observability-operator.html /container-platform/$1/network_observability/understanding-network-observability-operator.html [NE,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13)/networking/network_observability/configuring-operator.html /container-platform/$1/network_observability/configuring-operator.html [NE,R=302] + # Redirect for power monitoring (specific to 4.14 and latest) + RewriteRule ^container-platform/(4\.14|latest)/power_monitoring/(.*)$ /container-platform/$1/observability/power_monitoring/$2 [NE,R=302,L] # Redirects for Telco ZTP changes delivered in https://github.com/openshift/openshift-docs/pull/35889 RewriteRule ^container-platform/(4\.10|4\.11)/scalability_and_performance/ztp-deploying-disconnected.html /container-platform/$1/scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.html [NE,R=302] @@ -236,6 +231,17 @@ AddType text/vtt vtt RewriteRule ^container-platform/(4\.12|4\.13)/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html /container-platform/$1/scalability_and_performance/cnf-low-latency-tuning.html [NE,R=302] RewriteRule ^container-platform/(4\.12|4\.13)/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html /container-platform/$1/scalability_and_performance/cnf-low-latency-tuning.html [NE,R=302] + #Install redirects per https://github.com/openshift/openshift-docs/pull/79711 + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/index.html /container-platform/$1/installing/overview/index.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/installing-preparing.html /container-platform/$1/installing/overview/installing-preparing.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/cluster-capabilities.html /container-platform/$1/installing/overview/cluster-capabilities.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/installing-fips.html /container-platform/$1/installing/overview/installing-fips.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/validating-an-installation.html /container-platform/$1/installing/validation_and_troubleshooting/validating-an-installation.html [NE,R=302] + RewriteRule ^container-platform/(4\.1[2-6]|latest)/installing/installing-troubleshooting.html /container-platform/$1/installing/validation_and_troubleshooting/installing-troubleshooting.html [NE,R=302] + + + # ingress sharding redirect per https://github.com/openshift/openshift-docs/pull/76916 + RewriteRule ^container-platform/(4\.1[2-6]|latest)/networking/ingress-sharding.html /container-platform/$1/networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.html [NE,R=302] # Redirects for new ZTP edge computing section RewriteRule ^container-platform/(4\.15|4\.16)/scalability_and_performance/ztp_far_edge/cnf-talm-for-cluster-upgrades.html /container-platform/$1/edge_computing/cnf-talm-for-cluster-upgrades.html [NE,R=302] @@ -322,39 +328,49 @@ AddType text/vtt vtt # Builds using Shipwright landing page RewriteRule ^container-platform/(4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/builds_using_shipwright/overview-openshift-builds.html /builds/latest/about/overview-openshift-builds.html [L,R=302] - # redirect gitops latest to 1.12 + # redirect gitops latest to 1.13 RewriteRule ^gitops/?$ /gitops/latest [R=302] - RewriteRule ^gitops/latest/?(.*)$ /gitops/1\.12/$1 [NE,R=302] + RewriteRule ^gitops/latest/?(.*)$ /gitops/1\.13/$1 [NE,R=302] # redirect top-level without filespec to the about file - RewriteRule ^gitops/(1\.8|1\.9|1\.10|1\.11|1\.12)/?$ /gitops/$1/understanding_openshift_gitops/about-redhat-openshift-gitops.html [L,R=302] + RewriteRule ^gitops/(1\.8|1\.9|1\.10|1\.11|1\.12|1\.13)/?$ /gitops/$1/understanding_openshift_gitops/about-redhat-openshift-gitops.html [L,R=302] # GitOps landing page RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/about-redhat-openshift-gitops.html /gitops/latest/understanding_openshift_gitops/about-redhat-openshift-gitops.html [L,R=302] # redirect any links to existing OCP embedded content to standalone equivalent for each assembly - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/gitops-release-notes.html/ /gitops/latest/release_notes/gitops-release-notes.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/understanding-openshift-gitops.html /gitops/latest/understanding_openshift_gitops/about-redhat-openshift-gitops.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/installing-openshift-gitops.html /gitops/latest/installing_gitops/installing-openshift-gitops.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/uninstalling-openshift-gitops.html /gitops/latest/removing_gitops/uninstalling-openshift-gitops.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/setting-up-argocd-instance.html /gitops/latest/argocd_instance/setting-up-argocd-instance.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/monitoring-argo-cd-instances.html /gitops/latest/observability/monitoring/monitoring-argo-cd-instances.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/using-argo-rollouts-for-progressive-deployment-delivery.html /gitops/latest/argo_rollouts/using-argo-rollouts-for-progressive-deployment-delivery.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.html /gitops/latest/declarative_clusterconfig/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.html /gitops/latest/argocd_applications/deploying-a-spring-boot-application-with-argo-cd.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/argo-cd-custom-resource-properties.html /gitops/latest/argocd_instance/argo-cd-cr-component-properties.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-secure-communication-with-redis.html /gitops/latest/securing_openshift_gitops/configuring-secure-communication-with-redis.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/health-information-for-resources-deployment.html /gitops/latest/observability/monitoring/health-information-for-resources-deployment.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-sso-on-argo-cd-using-dex.html /gitops/latest/accesscontrol_usermanagement/configuring-sso-on-argo-cd-using-dex.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.html /gitops/latest/accesscontrol_usermanagement/configuring-sso-for-argo-cd-using-keycloak.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-argo-cd-rbac.html /gitops/latest/accesscontrol_usermanagement/configuring-argo-cd-rbac.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/configuring-resource-quota.html /gitops/latest/managing_resource/configuring-resource-quota.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.html /gitops/latest/observability/monitoring/monitoring-argo-cd-custom-resource-workloads.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/viewing-argo-cd-logs.html /gitops/latest/observability/logging/viewing-argo-cd-logs.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.html /gitops/latest/gitops_workloads_infranodes/run-gitops-control-plane-workload-on-infra-nodes.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/about-sizing-requirements-gitops.html /gitops/latest/installing_gitops/preparing-gitops-install.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/collecting-debugging-data-for-support.html /gitops/latest/understanding_openshift_gitops/gathering-gitops-diagnostic-information-for-support.html [L,R=302] - RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14)/cicd/gitops/troubleshooting-issues-in-GitOps.html /gitops/latest/troubleshooting_gitops_issues/auto-reboot-during-argo-cd-sync-with-machine-configurations.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/gitops-release-notes.html/ /gitops/latest/release_notes/gitops-release-notes.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/understanding-openshift-gitops.html /gitops/latest/understanding_openshift_gitops/about-redhat-openshift-gitops.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/installing-openshift-gitops.html /gitops/latest/installing_gitops/installing-openshift-gitops.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/uninstalling-openshift-gitops.html /gitops/latest/removing_gitops/uninstalling-openshift-gitops.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/setting-up-argocd-instance.html /gitops/latest/argocd_instance/setting-up-argocd-instance.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/monitoring-argo-cd-instances.html /gitops/latest/observability/monitoring/monitoring-argo-cd-instances.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/using-argo-rollouts-for-progressive-deployment-delivery.html /gitops/latest/argo_rollouts/using-argo-rollouts-for-progressive-deployment-delivery.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.html /gitops/latest/declarative_clusterconfig/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.html /gitops/latest/argocd_applications/deploying-a-spring-boot-application-with-argo-cd.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/argo-cd-custom-resource-properties.html /gitops/latest/argocd_instance/argo-cd-cr-component-properties.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-secure-communication-with-redis.html /gitops/latest/securing_openshift_gitops/configuring-secure-communication-with-redis.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/health-information-for-resources-deployment.html /gitops/latest/observability/monitoring/health-information-for-resources-deployment.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-sso-on-argo-cd-using-dex.html /gitops/latest/accesscontrol_usermanagement/configuring-sso-on-argo-cd-using-dex.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.html /gitops/latest/accesscontrol_usermanagement/configuring-sso-for-argo-cd-using-keycloak.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-argo-cd-rbac.html /gitops/latest/accesscontrol_usermanagement/configuring-argo-cd-rbac.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/configuring-resource-quota.html /gitops/latest/managing_resource/configuring-resource-quota.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.html /gitops/latest/observability/monitoring/monitoring-argo-cd-custom-resource-workloads.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/viewing-argo-cd-logs.html /gitops/latest/observability/logging/viewing-argo-cd-logs.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.html /gitops/latest/gitops_workloads_infranodes/run-gitops-control-plane-workload-on-infra-nodes.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/about-sizing-requirements-gitops.html /gitops/latest/installing_gitops/preparing-gitops-install.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/collecting-debugging-data-for-support.html /gitops/latest/understanding_openshift_gitops/gathering-gitops-diagnostic-information-for-support.html [L,R=302] + RewriteRule ^container-platform/(4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/cicd/gitops/troubleshooting-issues-in-GitOps.html /gitops/latest/troubleshooting_gitops_issues/auto-reboot-during-argo-cd-sync-with-machine-configurations.html [L,R=302] + + +# Lightspeed handling unversioned and latest links + RewriteRule ^lightspeed/?$ /lightspeed/latest [R=302] + RewriteRule ^lightspeed/latest/?(.*)$ /lightspeed/1\.0tp1/$1 [NE,R=302] + + # Lightspeed landing page + + RewriteRule ^container-platform/(4\.9|4\.10|4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/lightspeed/about/ols-openshift-lightspeed-overview.html /lightspeed/latest/about/ols-about-openshift-lightspeed.html [NE,R=302] + # Pipelines handling unversioned and latest links RewriteRule ^pipelines/?$ /pipelines/latest [R=302] @@ -506,14 +522,14 @@ AddType text/vtt vtt # The following rule prevents an infinite redirect loop when browsing to /container-platform/4.16/virt/about_virt/about-virt.html - RewriteRule ^container-platform/4\.16/virt/about_virt/about-virt.html$ - [L] + #RewriteRule ^container-platform/4\.16/virt/about_virt/about-virt.html$ - [L] # OpenShift Virtualization (CNV) catchall redirect; use when CNV releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `virt` directory traffic to the about-virt page. # Pay mind to the redirect directly above this which prevents redirect loops. # To activate the redirects, uncomment the next and previous lines and update the version number to the pending release. - RewriteRule container-platform/4\.16/virt/(?!about-virt\.html)(.+)$ /container-platform/4.16/virt/about_virt/about-virt.html [NE,R=302] + #RewriteRule container-platform/4\.16/virt/(?!about-virt\.html)(.+)$ /container-platform/4.16/virt/about_virt/about-virt.html [NE,R=302] # Red Hat OpenShift support for Windows Containers (WMCO) catchall redirect; use when WMCO releases asynchronously from OCP. Do not change the 302 to a 301. diff --git a/.s2i/httpd-cfg/01-community.conf b/.s2i/httpd-cfg/01-community.conf index 209f6f561549..da610fe5860c 100644 --- a/.s2i/httpd-cfg/01-community.conf +++ b/.s2i/httpd-cfg/01-community.conf @@ -160,13 +160,13 @@ AddType text/vtt vtt RewriteRule ^latest/install_config/upgrading/(.*)$ /latest/upgrading/$1 [NE,R=301] # The following rule prevents an infinite redirect loop when browsing to /(latest|4\.16)/virt/about_virt/about-virt.html - RewriteRule ^(latest|4\.16)/virt/about_virt/about-virt.html$ - [L] + #RewriteRule ^(latest|4\.16)/virt/about_virt/about-virt.html$ - [L] # OpenShift Virtualization (CNV) catchall redirect; use when CNV releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `virt` directory traffic to the about-virt page. # Pay mind to the redirect directly above this which prevents redirect loops. # To activate the redirects, uncomment the next and previous lines and update the version number to the pending release. - RewriteRule ^(latest|4\.16)/virt/(?!about-virt\.html)(.+)$ /$1/virt/about_virt/about-virt.html [NE,R=302] + #RewriteRule ^(latest|4\.16)/virt/(?!about-virt\.html)(.+)$ /$1/virt/about_virt/about-virt.html [NE,R=302] # Red Hat OpenShift support for Windows Containers (WMCO) catchall redirect; use when WMCO releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `windows_containers` directory traffic to the /windows_containers/index.html page. diff --git a/404-commercial.html b/404-commercial.html index 28058c3c38d1..21599e7c3c2c 100644 --- a/404-commercial.html +++ b/404-commercial.html @@ -167,7 +167,7 @@

Not found

- Red Hat + Red Hat
diff --git a/OWNERS-ALIASES b/OWNERS-ALIASES deleted file mode 100644 index 59adbf2276f8..000000000000 --- a/OWNERS-ALIASES +++ /dev/null @@ -1,31 +0,0 @@ -# See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES -# Do not change indents. Incorrect indents break the Prow CI - -aliases: - merge-rights: - - abhatt-rh - - abrennan89 - - adellape - - aireilly - - bburt-rh - - bergerhoffer - - bscott-rh - - gabriel-rh - - jab-rh - - jeana-redhat - - JoeAldinger - - kalexand-rh - - kcarmichael08 - - michaelryanpeter - - opayne1 - - ousleyp - - sjhala-ccs - - snarayan-redhat - - Srivaralakshmi - approve-rights: - - adellape - - bergerhoffer - - JoeAldinger - - kalexand-rh - - ousleyp - - sjhala-ccs diff --git a/_attributes/attributes-microshift.adoc b/_attributes/attributes-microshift.adoc index 730441cfa0a0..7e1bced49649 100644 --- a/_attributes/attributes-microshift.adoc +++ b/_attributes/attributes-microshift.adoc @@ -4,12 +4,14 @@ :experimental: :imagesdir: images :OCP: OpenShift Container Platform -:ocp-version: 4.16 +:ocp-version: 4.17 :oc-first: pass:quotes[OpenShift CLI (`oc`)] +//OpenShift Kubernetes Engine +:oke: OpenShift Kubernetes Engine :product-title-first: Red Hat build of MicroShift (MicroShift) :microshift-short: MicroShift :product-registry: OpenShift image registry -:product-version: 4.16 +:product-version: 4.17 :rhel-major: rhel-9 :op-system-base-full: Red Hat Enterprise Linux (RHEL) :op-system: RHEL @@ -19,6 +21,6 @@ :op-system-version: 9.4 :op-system-version-major: 9 :op-system-bundle: Red Hat Device Edge -:rpm-repo-version: rhocp-4.16 +:rpm-repo-version: rhocp-4.17 :rhde-version: 4 :VirtProductName: OpenShift Virtualization diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 61c10ba22034..8b8fc0e5491b 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -38,7 +38,7 @@ endif::[] :oadp-first: OpenShift API for Data Protection (OADP) :oadp-full: OpenShift API for Data Protection :oadp-short: OADP -:oadp-version: 1.3.1 +:oadp-version: 1.4.0 :oc-first: pass:quotes[OpenShift CLI (`oc`)] :product-registry: OpenShift image registry :product-mirror-registry: Mirror registry for Red Hat OpenShift @@ -46,19 +46,15 @@ endif::[] :rh-storage: OpenShift Data Foundation :rh-rhacm-first: Red Hat Advanced Cluster Management (RHACM) :rh-rhacm: RHACM -:rh-rhacm-version: 2.10 -:sandboxed-containers-first: OpenShift sandboxed containers -:sandboxed-containers-operator: OpenShift sandboxed containers Operator -:sandboxed-containers-version: 1.5 -:sandboxed-containers-version-z: 1.5.0 -:sandboxed-containers-legacy-version: 1.4.1 +:rh-rhacm-version: 2.11 +:osc: OpenShift sandboxed containers :cert-manager-operator: cert-manager Operator for Red Hat OpenShift :secondary-scheduler-operator-full: Secondary Scheduler Operator for Red Hat OpenShift :secondary-scheduler-operator: Secondary Scheduler Operator :descheduler-operator: Kube Descheduler Operator // Backup and restore :velero-domain: velero.io -:velero-version: 1.12 +:velero-version: 1.14 :launch: image:app-launcher.png[title="Application Launcher"] :mtc-short: MTC :mtc-full: Migration Toolkit for Containers @@ -95,8 +91,8 @@ endif::[] :opp: OpenShift Platform Plus //openshift virtualization (cnv) :VirtProductName: OpenShift Virtualization -:VirtVersion: 4.15 -:HCOVersion: 4.15.0 +:VirtVersion: 4.16 +:HCOVersion: 4.16.0 :CNVNamespace: openshift-cnv :CNVOperatorDisplayName: OpenShift Virtualization Operator :CNVSubscriptionSpecSource: redhat-operators @@ -136,6 +132,9 @@ ifdef::telco-core[] :rds: telco core :rds-caps: Telco core endif::[] +//lightspeed +:ols-official: Red Hat OpenShift Lightspeed +:ols: OpenShift Lightspeed //logging :logging: logging :logging-uc: Logging @@ -147,6 +146,10 @@ endif::[] //observability :ObservabilityLongName: Red Hat OpenShift Observability :ObservabilityShortName: Observability +// Cluster Monitoring Operator +:cmo-first: Cluster Monitoring Operator (CMO) +:cmo-full: Cluster Monitoring Operator +:cmo-short: CMO //power monitoring :PM-title-c: Power monitoring for Red Hat OpenShift :PM-title: power monitoring for Red Hat OpenShift @@ -164,8 +167,8 @@ endif::[] :product-rosa: Red Hat OpenShift Service on AWS :SMProductName: Red Hat OpenShift Service Mesh :SMProductShortName: Service Mesh -:SMProductVersion: 2.5.2 -:MaistraVersion: 2.5 +:SMProductVersion: 2.6.0 +:MaistraVersion: 2.6 :KialiProduct: Kiali Operator provided by Red Hat :SMPlugin: OpenShift Service Mesh Console (OSSMC) plugin :SMPluginShort: OSSMC plugin diff --git a/_distro_map.yml b/_distro_map.yml index 95ca2f71020f..20901e4015e2 100644 --- a/_distro_map.yml +++ b/_distro_map.yml @@ -310,6 +310,9 @@ openshift-acs: rhacs-docs-4.4: name: '4.4' dir: acs/4.4 + rhacs-docs-4.5: + name: '4.5' + dir: acs/4.5 microshift: name: Red Hat build of MicroShift author: OpenShift Documentation Project @@ -398,6 +401,9 @@ openshift-pipelines: pipelines-docs-1.15: name: '1.15' dir: pipelines/1.15 + pipelines-docs-1.16: + name: '1.16' + dir: pipelines/1.16 openshift-builds: name: builds for Red Hat OpenShift author: OpenShift documentation team diff --git a/_templates/_footer_origin.html.erb b/_templates/_footer_origin.html.erb index 6f7d6c38ccbc..4ad78401c080 100644 --- a/_templates/_footer_origin.html.erb +++ b/_templates/_footer_origin.html.erb @@ -24,14 +24,10 @@
- +
AsciiBinder diff --git a/_templates/_footer_other.html.erb b/_templates/_footer_other.html.erb index f948664a1316..c07d1cc41f0e 100644 --- a/_templates/_footer_other.html.erb +++ b/_templates/_footer_other.html.erb @@ -4,7 +4,7 @@
- Red Hat + Red Hat
diff --git a/_templates/_page_openshift.html.erb b/_templates/_page_openshift.html.erb index 3cdda1c8b956..4f04a7c8cdb6 100644 --- a/_templates/_page_openshift.html.erb +++ b/_templates/_page_openshift.html.erb @@ -67,6 +67,14 @@ <% end %> + <% if (distro_key == "openshift-lightspeed") %> + + + + <% end %> + <% if (version == "4.17") && (distro_key != "openshift-webscale" && distro_key != "openshift-dpu" && distro_key != "rosa-hcp") %> diff --git a/scripts/ocpdocs/index-commercial.html b/scripts/ocpdocs/index-commercial.html index 405b9fba5a47..fb85d114918d 100644 --- a/scripts/ocpdocs/index-commercial.html +++ b/scripts/ocpdocs/index-commercial.html @@ -253,11 +253,7 @@

OKD

- +
diff --git a/scripts/ocpdocs/search-commercial.html b/scripts/ocpdocs/search-commercial.html index 8a1ea6a42eab..ea16793646fb 100644 --- a/scripts/ocpdocs/search-commercial.html +++ b/scripts/ocpdocs/search-commercial.html @@ -183,11 +183,7 @@

Search Results

- +
diff --git a/scripts/prow/README.adoc b/scripts/prow/README.adoc index f157e1aad7cb..b27db0bd1043 100644 --- a/scripts/prow/README.adoc +++ b/scripts/prow/README.adoc @@ -7,7 +7,7 @@ Then, open a pull request against the https://github.com/openshift/release repos To add a new job to Prow CI, do the following: -. Install link:https://podman.io/docs/installation[podman]. +. Install link:https://podman.io/docs/installation[Podman]. . Fork and clone the https://github.com/openshift/release repository to `$HOME/release`. @@ -57,6 +57,8 @@ make CONTAINER_ENGINE=podman ci-operator-config WHAT=openshift/openshift-docs make jobs CONTAINER_ENGINE=podman WHAT=openshift/openshift-docs ---- ++ +Ensure that the `make` build target succeeds. . Open a PR against the link:https://github.com/openshift/release[openshift/release] `master` branch. Ensure that all Prow CI tests pass. Add a `/pj-rehearse` comment in the pull request to verify the new build. diff --git a/scripts/prow/openshift-openshift-docs-BRANCH.yaml b/scripts/prow/openshift-openshift-docs-BRANCH.yaml index 911271069881..c61cd11bdae6 100644 --- a/scripts/prow/openshift-openshift-docs-BRANCH.yaml +++ b/scripts/prow/openshift-openshift-docs-BRANCH.yaml @@ -9,10 +9,10 @@ resources: limits: memory: 4Gi requests: - cpu: 200m - memory: 400Mi + cpu: 400m + memory: 800Mi tests: -- as: deploy-preview +- as: validate-asciidoc steps: env: DISTROS: ${DISTROS} @@ -20,13 +20,10 @@ tests: test: - ref: openshift-docs-build-docs - ref: openshift-docs-preview-comment-pages -- as: validate-asciidoc - steps: - test: - ref: openshift-docs-asciidoctor - ref: openshift-docs-lint-topicmaps - - ref: openshift-docs-vale-review - ref: openshift-docs-jira-links + - ref: openshift-docs-vale-review - as: validate-portal steps: env: diff --git a/search-commercial.html b/search-commercial.html index 36fe475cdcd0..8db9c9789a95 100644 --- a/search-commercial.html +++ b/search-commercial.html @@ -157,11 +157,11 @@

Search Results

- Red Hat + Red Hat
-

Copyright © 2021 Red Hat, Inc.

+

Copyright © 2024 Red Hat, Inc.

diff --git a/security/cert_manager_operator/cert-manager-operator-release-notes.adoc b/security/cert_manager_operator/cert-manager-operator-release-notes.adoc index 524c1aef0f0d..514114390b76 100644 --- a/security/cert_manager_operator/cert-manager-operator-release-notes.adoc +++ b/security/cert_manager_operator/cert-manager-operator-release-notes.adoc @@ -12,6 +12,46 @@ These release notes track the development of {cert-manager-operator}. For more information, see xref:../../security/cert_manager_operator/index.adoc#cert-manager-operator-about[About the {cert-manager-operator}]. +[id="cert-manager-operator-release-notes-1-14-0"] +== {cert-manager-operator} 1.14.0 + +Issued: 2024-07-08 + +The following advisory is available for the {cert-manager-operator} 1.14.0: + +* link:https://access.redhat.com/errata/RHEA-2024:4360[RHEA-2024:4360] + +Version `1.14.0` of the {cert-manager-operator} is based on the upstream cert-manager version `v1.14.5`. For more information, see the link:https://cert-manager.io/docs/releases/release-notes/release-notes-1.14/#v1145[cert-manager project release notes for v1.14.5]. + +[id="cert-manager-operator-new-features-1-14-0"] +=== New features and enhancements + +*FIPS compliance support* + +With this release, FIPS mode is now automatically enabled for {cert-manager-operator}. When installed on an {product-title} cluster in FIPS mode, {cert-manager-operator} ensures compatibility without affecting the cluster's FIPS support status. + +*Securing routes with cert-manager managed certificates (Technology Preview)* + +With this release, you can manage certificates referenced in `Route` resources by using the {cert-manager-operator}. For more information, see xref:../../security/cert_manager_operator/cert-manager-securing-routes.adoc#cert-manager-securing-routes[Securing routes with the {cert-manager-operator}]. + +*NCM issuer* + +The {cert-manager-operator} now supports the Nokia NetGuard Certificate Manager (NCM) issuer. The `ncm-issuer` is a cert-manager external issuer that integrates with the NCM PKI system using a Kubernetes controller to sign certificate requests. This integration streamlines the process of obtaining non-self-signed certificates for applications, ensuring their validity and keeping them updated. + +[NOTE] +==== +The NCM issuer is validated only with version 1.1.1 and the {cert-manager-operator} version 1.14.0. This version handles tasks such as issuance, renewal, and managing certificates for the API server and ingress controller of {product-title} clusters. +==== + +[id="cert-manager-operator-1-14-0-CVEs"] +=== CVEs + +* link:https://access.redhat.com/security/cve/CVE-2023-45288[CVE-2023-45288] +* link:https://access.redhat.com/security/cve/CVE-2024-28180[CVE-2024-28180] +* link:https://access.redhat.com/security/cve/CVE-2020-8559[CVE-2020-8559] +* link:https://access.redhat.com/security/cve/CVE-2024-26147[CVE-2024-26147] +* link:https://access.redhat.com/security/cve/CVE-2024-24783[CVE-2024-24783] + [id="cert-manager-operator-release-notes-1-13-1"] == {cert-manager-operator} 1.13.1 diff --git a/security/cert_manager_operator/cert-manager-securing-routes.adoc b/security/cert_manager_operator/cert-manager-securing-routes.adoc new file mode 100644 index 000000000000..df42e9566c16 --- /dev/null +++ b/security/cert_manager_operator/cert-manager-securing-routes.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: ASSEMBLY +[id="cert-manager-securing-routes"] += Securing routes with the {cert-manager-operator} +include::_attributes/common-attributes.adoc[] +:context: cert-manager-securing-routes + +toc::[] + +In the {product-title}, the route API is extended to provide a configurable option to reference TLS certificates via secrets. With the xref:../../networking/routes/secured-routes.adoc#nw-ingress-route-secret-load-external-cert_secured-routes[Creating a route with externally managed certificate] Technology Preview feature enabled, you can minimize errors from manual intervention, streamline the certificate management process, and enable the {product-title} router to promptly serve the referenced certificate. + +:FeatureName: Securing routes with the {cert-manager-operator} +include::snippets/technology-preview.adoc[] + +include::modules/cert-manager-configuring-routes.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_{context}"] +== Additional resources + +* xref:../../networking/routes/secured-routes.adoc#nw-ingress-route-secret-load-external-cert_secured-routes[Creating a route with externally managed certificate] + +* xref:../../security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc#cert-manager-operator-issuer-acme[Configuring an ACME issuer] \ No newline at end of file diff --git a/security/cert_manager_operator/index.adoc b/security/cert_manager_operator/index.adoc index fd1c471fa191..f8920383ca55 100644 --- a/security/cert_manager_operator/index.adoc +++ b/security/cert_manager_operator/index.adoc @@ -20,9 +20,14 @@ include::modules/cert-manager-request-methods.adoc[leveloffset=+1] //Supported versions include::modules/cert-manager-supported-versions.adoc[leveloffset=+1] +//FIPS compliant support +include::modules/cert-manager-fips-support.adoc[leveloffset=+1] [role="_additional-resources"] [id="cert-manager-operator-about_additional-resources"] == Additional resources * link:https://cert-manager.io/docs/[cert-manager project documentation] +* xref:../../security/container_security/security-compliance.adoc#security-compliance[Understanding compliance] +* xref:../../installing/overview/installing-fips.adoc#installing-fips-mode_installing-fips[Installing a cluster in FIPS mode] +* xref:../../installing/overview/installing-preparing.adoc#installing-preparing-security[Do you need extra security for your cluster?] \ No newline at end of file diff --git a/security/certificate_types_descriptions/etcd-certificates.adoc b/security/certificate_types_descriptions/etcd-certificates.adoc index d97fe43d38aa..ed522c6291ec 100644 --- a/security/certificate_types_descriptions/etcd-certificates.adoc +++ b/security/certificate_types_descriptions/etcd-certificates.adoc @@ -17,10 +17,6 @@ The CA certificates are valid for 10 years. The peer, client, and server certifi include::modules/rotating-certificate-authority.adoc[leveloffset=+1] include::modules/etcd-cert-alerts-metrics-signer.adoc[leveloffset=+1] -.Additional resources - -* xref:../../security/certificate_types_descriptions/etcd-certificates.adoc#rotating-certificate-authority_cert-types-etcd-certificates[Rotating the etcd certificate] - == Management These certificates are only managed by the system and are automatically rotated. diff --git a/security/container_security/security-compliance.adoc b/security/container_security/security-compliance.adoc index 8a7904378f9b..1459d22d9020 100644 --- a/security/container_security/security-compliance.adoc +++ b/security/container_security/security-compliance.adoc @@ -17,5 +17,5 @@ ifndef::openshift-origin[] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing-fips.adoc#installing-fips-mode_installing-fips[Installing a cluster in FIPS mode] +* xref:../../installing/overview/installing-fips.adoc#installing-fips-mode_installing-fips[Installing a cluster in FIPS mode] endif::[] diff --git a/security/container_security/security-hardening.adoc b/security/container_security/security-hardening.adoc index 8195ea2fd1b0..0b4b476835cb 100644 --- a/security/container_security/security-hardening.adoc +++ b/security/container_security/security-hardening.adoc @@ -44,6 +44,6 @@ include::modules/security-hardening-how.adoc[leveloffset=+1] * xref:../../nodes/nodes/nodes-nodes-managing.adoc#nodes-nodes-kernel-arguments_nodes-nodes-managing[Adding kernel arguments to nodes] ifndef::openshift-origin[] * xref:../../installing/installing_aws/installation-config-parameters-aws.adoc#installation-configuration-parameters-optional_installation-config-parameters-aws[Optional configuration parameters] -* xref:../../installing/installing-fips.adoc#installing-fips[Support for FIPS cryptography] +* xref:../../installing/overview/installing-fips.adoc#installing-fips[Support for FIPS cryptography] * link:https://access.redhat.com/articles/3359851[{op-system-base} core crypto components] endif::[] diff --git a/security/container_security/security-hosts-vms.adoc b/security/container_security/security-hosts-vms.adoc index f11356884062..a7dd44490841 100644 --- a/security/container_security/security-hosts-vms.adoc +++ b/security/container_security/security-hosts-vms.adoc @@ -30,7 +30,7 @@ include::modules/security-hosts-vms-rhcos.adoc[leveloffset=+1] * xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Chrony time service] * xref:../../updating/understanding_updates/intro-to-updates.adoc#update-service-about_understanding-openshift-updates[About the OpenShift Update Service] ifndef::openshift-origin[] -* xref:../../installing/installing-fips.adoc#installing-fips[FIPS cryptography] +* xref:../../installing/overview/installing-fips.adoc#installing-fips[FIPS cryptography] endif::[] // Virtualization versus containers @@ -42,5 +42,5 @@ include::modules/security-hosts-vms-openshift.adoc[leveloffset=+1] ifndef::openshift-origin[] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing-fips.adoc#installing-fips[FIPS cryptography] +* xref:../../installing/overview/installing-fips.adoc#installing-fips[FIPS cryptography] endif::[] diff --git a/security/rh-required-whitelisted-IP-addresses-for-sre-access.adoc b/security/rh-required-whitelisted-IP-addresses-for-sre-access.adoc new file mode 100644 index 000000000000..4c0ef5ccf387 --- /dev/null +++ b/security/rh-required-whitelisted-IP-addresses-for-sre-access.adoc @@ -0,0 +1,39 @@ +:_mod-docs-content-type: ASSEMBLY +[id="rh-required-whitelisted-IP-addresses-for-sre-access_{context}"] +include::_attributes/attributes-openshift-dedicated.adoc[] +include::_attributes/common-attributes.adoc[] += Required allowlist IP addresses for SRE cluster access + +:context: rh-required-whitelisted-IP-addresses-for-sre-access + +toc::[] + +[id="required-whitelisted-overview_{context}"] +== Overview + +For Red Hat SREs to troubleshoot any issues within {product-title} clusters, they must have ingress access to the API server through allowlist IP addresses. + +[id="required-whitelisted-access_{context}"] +== Obtaining allowlisted IP addresses +{product-title} users can use an {cluster-manager} CLI command to obtain the most up-to-date allowlist IP addresses for the Red Hat machines that are necessary for SRE access to {product-title} clusters. + +[NOTE] +==== +These allowlist IP addresses are not permanent and are subject to change. You must continuously review the API output for the most current allowlist IP addresses. +==== +.Prerequisites +* You installed the link:https://console.redhat.com/openshift/downloads[OpenShift Cluster Manager API command-line interface (`ocm`)]. +* You are able to configure your firewall to include the allowlist IP addresses. + +.Procedure +. To get the current allowlist IP addresses needed for SRE access to your {product-title} cluster, run the following command: ++ +[source,terminal] +---- +$ ocm get /api/clusters_mgmt/v1/trusted_ip_addresses|jq -r '.items[].id' +---- +. Configure your firewall to grant access to the allowlist IP addresses. + + + + diff --git a/security/security_profiles_operator/spo-release-notes.adoc b/security/security_profiles_operator/spo-release-notes.adoc index ddd9f4430e6f..4cd6b8e87bff 100644 --- a/security/security_profiles_operator/spo-release-notes.adoc +++ b/security/security_profiles_operator/spo-release-notes.adoc @@ -12,6 +12,34 @@ These release notes track the development of the Security Profiles Operator in { For an overview of the Security Profiles Operator, see xref:../../security/security_profiles_operator/spo-overview.adoc#[Security Profiles Operator Overview]. +[id="spo-release-notes-0-8-5"] +== Security Profiles Operator 0.8.5 + +The following advisory is available for the Security Profiles Operator 0.8.5: + +* link:https://access.redhat.com/errata/RHBA-2024:5016[RHBA-2024:5016 - OpenShift Security Profiles Operator bug fix update] + +[id="spo-0-8-5-bug-fixes"] +=== Bug fixes + +* When attempting to install the Security Profile Operator from the web console, the option to enable Operator-recommended cluster monitoring was unavailable for the namespace. With this update, you can now enabled Operator-recommend cluster monitoring in the namespace. (link:https://issues.redhat.com/browse/OCPBUGS-37794[*OCPBUGS-37794*]) + +* Previously, the Security Profiles Operator would intermittently be not visible in the OperatorHub, which caused limited access to install the Operator via the web console. With this update, the Security Profiles Operator is present in the OperatorHub. + +[id="spo-release-notes-0-8-4"] +== Security Profiles Operator 0.8.4 + +The following advisory is available for the Security Profiles Operator 0.8.4: + +* link:https://access.redhat.com/errata/RHBA-2024:4781[RHBA-2024:4781 - OpenShift Security Profiles Operator bug fix update] + +This update addresses CVEs in underlying dependencies. + +[id="spo-0-8-4-new-features-and-enhancements"] +=== New features and enhancements + +* You can now specify a default security profile in the `image` attribute of a `ProfileBinding` object by setting a wildcard. For more information, see xref:../../security/security_profiles_operator/spo-selinux.adoc#spo-binding-workloads_spo-selinux[Binding workloads to profiles with ProfileBindings (SELinux)] and xref:../../security/security_profiles_operator/spo-seccomp.adoc#spo-binding-workloads_spo-seccomp[Binding workloads to profiles with ProfileBindings (Seccomp)]. + [id="spo-release-notes-0-8-2"] == Security Profiles Operator 0.8.2 diff --git a/serverless/install/preparing-serverless-install.adoc b/serverless/install/preparing-serverless-install.adoc index d11df4e1ffc9..2afbfc5f804b 100644 --- a/serverless/install/preparing-serverless-install.adoc +++ b/serverless/install/preparing-serverless-install.adoc @@ -62,5 +62,5 @@ endif::[] ifdef::openshift-enterprise[] * xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] * xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-operatorhub-overview[Understanding OperatorHub] -* xref:../../installing/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] +* xref:../../installing/overview/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] endif::[] diff --git a/service_mesh/v1x/preparing-ossm-installation.adoc b/service_mesh/v1x/preparing-ossm-installation.adoc index 77b62212d584..08465cb6080e 100644 --- a/service_mesh/v1x/preparing-ossm-installation.adoc +++ b/service_mesh/v1x/preparing-ossm-installation.adoc @@ -24,7 +24,7 @@ ifdef::openshift-enterprise[] + [NOTE] ==== -If you are installing {SMProductName} on a xref:../../installing/installing-preparing.adoc#supported-installation-methods-for-different-platforms[restricted network], follow the instructions for your chosen {product-title} infrastructure. +If you are installing {SMProductName} on a xref:../../installing/overview/installing-preparing.adoc#supported-installation-methods-for-different-platforms[restricted network], follow the instructions for your chosen {product-title} infrastructure. ==== + endif::[] diff --git a/service_mesh/v2x/ossm-gateway-migration.adoc b/service_mesh/v2x/ossm-gateway-migration.adoc new file mode 100644 index 000000000000..829f829f21d1 --- /dev/null +++ b/service_mesh/v2x/ossm-gateway-migration.adoc @@ -0,0 +1,21 @@ +:_mod-docs-content-type: ASSEMBLY +[id="ossm-gateway-migration"] += Gateway migration +include::_attributes/common-attributes.adoc[] +:context: gateway-migration + +toc::[] + +As a network administrator, the preferred method for deploying ingress and egress gateways is with a `Deployment` resource using gateway injection. + +include::modules/ossm-about-gateway-migration.adoc[leveloffset=+1] + +include::modules/ossm-migrating-from-smcp-defined-gateways-to-gateway-injection.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_{context}"] +== Additional resources + +* xref:../../service_mesh/v2x/ossm-traffic-manage.adoc#ossm-automatic-gateway-injection_traffic-management[Enabling gateway injection] + +* xref:../../service_mesh/v2x/ossm-traffic-manage.adoc#ossm-deploying-automatic-gateway-injection_traffic-management[Deploying automatic gateway injection] \ No newline at end of file diff --git a/service_mesh/v2x/ossm-observability.adoc b/service_mesh/v2x/ossm-observability.adoc index 5e02a26316da..a85fc8a286e6 100644 --- a/service_mesh/v2x/ossm-observability.adoc +++ b/service_mesh/v2x/ossm-observability.adoc @@ -22,12 +22,6 @@ include::modules/ossm-distr-tracing.adoc[leveloffset=+1] include::modules/ossm-configuring-distr-tracing-tempo.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -ifndef::openshift-rosa,openshift-dedicated[] -xref:../../observability/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.adoc[Installing the distributed tracing platform (Tempo)]. -endif::openshift-rosa,openshift-dedicated[] - include::modules/ossm-config-external-jaeger.adoc[leveloffset=+2] include::modules/ossm-config-sampling.adoc[leveloffset=+2] diff --git a/service_mesh/v2x/ossm-route-migration.adoc b/service_mesh/v2x/ossm-route-migration.adoc new file mode 100644 index 000000000000..8d4d9b2bce84 --- /dev/null +++ b/service_mesh/v2x/ossm-route-migration.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: ASSEMBLY +[id="ossm-route-migration"] += Route migration +include::_attributes/common-attributes.adoc[] +:context: route-migration + +toc::[] + +Automatic route creation, also known as Istio OpenShift Routing (IOR), is a deprecated feature that is disabled by default for any `ServiceMeshControlPlane` resource that was created using {SMProductName} 2.5 and later. Migrating from IOR to explicitly-managed routes provides a more flexible way to manage and configure ingress gateways. When route resources are explicitly created they can be managed alongside the other gateway and application resources as part of a GitOps management model. + +include::modules/ossm-migrating-from-ior-to-explicitly-managed-routes.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_{context}"] +== Additional resources + +* xref:../../networking/routes/route-configuration.adoc#nw-creating-a-route_route-configuration[Creating an HTTP-based Route] +* xref:../../service_mesh/v2x/ossm-traffic-manage.adoc#ossm-auto-route_traffic-management[Understanding automatic routes] \ No newline at end of file diff --git a/service_mesh/v2x/ossm-traffic-manage.adoc b/service_mesh/v2x/ossm-traffic-manage.adoc index e360bccafea9..3dc851741b99 100644 --- a/service_mesh/v2x/ossm-traffic-manage.adoc +++ b/service_mesh/v2x/ossm-traffic-manage.adoc @@ -12,6 +12,7 @@ include::modules/ossm-gateways.adoc[leveloffset=+1] // Hiding in ROSA/OSD, dedicated-admin cannot create "services" or "deployments" ifndef::openshift-rosa,openshift-dedicated[] + include::modules/ossm-automatic-gateway-injection.adoc[leveloffset=+2] include::modules/ossm-deploying-automatic-gateway-injection.adoc[leveloffset=+2] diff --git a/service_mesh/v2x/preparing-ossm-installation.adoc b/service_mesh/v2x/preparing-ossm-installation.adoc index f87fd58f3e0c..7e206cd45791 100644 --- a/service_mesh/v2x/preparing-ossm-installation.adoc +++ b/service_mesh/v2x/preparing-ossm-installation.adoc @@ -14,7 +14,7 @@ Before you can install {SMProductName}, you must subscribe to {product-title} an ifdef::openshift-enterprise[] * Review the xref:../../architecture/architecture-installation.adoc#installation-overview_architecture-installation[{product-title} {product-version} overview]. -* Install {product-title} {product-version}. If you are installing {SMProductName} on a xref:../../installing/installing-preparing.adoc#supported-installation-methods-for-different-platforms[restricted network], follow the instructions for your chosen {product-title} infrastructure. +* Install {product-title} {product-version}. If you are installing {SMProductName} on a xref:../../installing/overview/installing-preparing.adoc#supported-installation-methods-for-different-platforms[restricted network], follow the instructions for your chosen {product-title} infrastructure. ** xref:../../installing/installing_aws/ipi/installing-aws-default.adoc#installing-aws-default[Install {product-title} {product-version} on AWS] ** xref:../../installing/installing_aws/upi/installing-aws-user-infra.adoc#installing-aws-user-infra[Install {product-title} {product-version} on AWS with user-provisioned infrastructure] ** xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Install {product-title} {product-version} on bare metal] diff --git a/service_mesh/v2x/upgrading-ossm.adoc b/service_mesh/v2x/upgrading-ossm.adoc index 250f5a142509..a02d62091e98 100644 --- a/service_mesh/v2x/upgrading-ossm.adoc +++ b/service_mesh/v2x/upgrading-ossm.adoc @@ -36,6 +36,8 @@ Although you can deploy multiple versions of the control plane in the same clust For more information about migrating your extensions, refer to xref:../../service_mesh/v2x/ossm-extensions.adoc#ossm-extensions-migration-overview_ossm-extensions[Migrating from ServiceMeshExtension to WasmPlugin resources]. +include::modules/ossm-upgrade-25-26-changes.adoc[leveloffset=+2] + include::modules/ossm-upgrade-24-25-changes.adoc[leveloffset=+2] include::modules/ossm-upgrade-23-24-changes.adoc[leveloffset=+2] diff --git a/snippets/cluster-network-operator-abstract.adoc b/snippets/cluster-network-operator-abstract.adoc new file mode 100644 index 000000000000..17c0f68b70ea --- /dev/null +++ b/snippets/cluster-network-operator-abstract.adoc @@ -0,0 +1,12 @@ +// Text snippet included in the following assemblies: +// +// * /networking/cluster-network-operator.adoc +// * post_installation_configuration/post-install-network-configuration.adoc +// +// Text snippet included in the following modules: +// +// * list of modules where this text snippet is included + +:_mod-docs-content-type: SNIPPET + +You can use the Cluster Network Operator (CNO) to deploy and manage cluster network components on an {product-title} cluster, including the Container Network Interface (CNI) network plugin selected for the cluster during installation. diff --git a/snippets/distr-tracing-and-otel-disclaimer-about-docs-for-supported-features-only.adoc b/snippets/distr-tracing-and-otel-disclaimer-about-docs-for-supported-features-only.adoc new file mode 100644 index 000000000000..17ffbf849347 --- /dev/null +++ b/snippets/distr-tracing-and-otel-disclaimer-about-docs-for-supported-features-only.adoc @@ -0,0 +1,11 @@ +// Text snippet included in the following modules: +// +// * observability/distr_tracing/distr-tracing-rn.adoc +// * observability/otel/otel-rn.adoc + +:_mod-docs-content-type: SNIPPET + +[NOTE] +==== +Only supported features are documented. Undocumented features are currently unsupported. If you need assistance with a feature, contact Red Hat's support. +==== diff --git a/snippets/distr-tracing-tempo-required-secret-parameters.adoc b/snippets/distr-tracing-tempo-required-secret-parameters.adoc index 2656b961d5bc..934225e3063a 100644 --- a/snippets/distr-tracing-tempo-required-secret-parameters.adoc +++ b/snippets/distr-tracing-tempo-required-secret-parameters.adoc @@ -1,7 +1,6 @@ // Text snippet included in the following modules: // -// * distr-tracing-tempo-install-web-console.adoc -// * distr-tracing-tempo-install-cli.adoc +// * distr-tracing-tempo-storage-ref.adoc :_mod-docs-content-type: SNIPPET diff --git a/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc b/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc index 6fd6b0216318..391c23391921 100644 --- a/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc +++ b/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc @@ -19,14 +19,25 @@ spec: size: Gi # <2> s3: # <3> secret: # <4> + tls: # <5> + enabled: true + caName: # <6> jaegerui: - enabled: true # <5> + enabled: true # <7> route: - enabled: true # <6> + enabled: true # <8> + resources: # <9> + total: + limits: + memory: Gi + cpu: m ---- -<1> Type of storage for storing traces: in-memory storage, a persistent volume, or object storage. The value for the `tmpfs` in-memory storage is `memory`. The value for a persistent volume is `pv`. The accepted values for object storage are `s3`, `gcs`, or `azure`, depending on the used object store type. +<1> Type of storage for storing traces: in-memory storage, a persistent volume, or object storage. The value for a persistent volume is `pv`. The accepted values for object storage are `s3`, `gcs`, or `azure`, depending on the used object store type. The default value is `memory` for the `tmpfs` in-memory storage, which is only appropriate for development, testing, demonstrations, and proof-of-concept environments because the data does not persist when the pod is shut down. <2> Memory size: For in-memory storage, this means the size of the `tmpfs` volume, where the default is `2Gi`. For a persistent volume, this means the size of the persistent volume claim, where the default is `10Gi`. For object storage, this means the size of the persistent volume claim for the Tempo WAL, where the default is `10Gi`. <3> Optional: For object storage, the type of object storage. The accepted values are `s3`, `gcs`, and `azure`, depending on the used object store type. <4> Optional: For object storage, the value of the `name` in the `metadata` of the storage secret. The storage secret must be in the same namespace as the TempoMonolithic instance and contain the fields specified in "Table 1. Required secret parameters" in the section "Object storage setup". -<5> Enables the Jaeger UI. -<6> Enables creation of a route for the Jaeger UI. +<5> Optional. +<6> Optional: Name of a `ConfigMap` object that contains a CA certificate. +<7> Enables the Jaeger UI. +<8> Enables creation of a route for the Jaeger UI. +<9> Optional. diff --git a/snippets/distr-tracing-tempo-tempostack-custom-resource.adoc b/snippets/distr-tracing-tempo-tempostack-custom-resource.adoc new file mode 100644 index 000000000000..365ecf195a5c --- /dev/null +++ b/snippets/distr-tracing-tempo-tempostack-custom-resource.adoc @@ -0,0 +1,43 @@ +// :_mod-docs-content-type: SNIPPET +// Text snippet included in the following modules: +// +// * modules/distr-tracing-tempo-install-tempostack-web-console.adoc +// * modules/distr-tracing-tempo-install-tempostack-cli.adoc + +[source,yaml] +---- +apiVersion: tempo.grafana.com/v1alpha1 +kind: TempoStack +metadata: + name: sample + namespace: +spec: + storageSize: Gi # <1> + storage: + secret: # <2> + name: # <3> + type: # <4> + tls: # <5> + enabled: true + caName: # <6> + template: + queryFrontend: + jaegerQuery: + enabled: true + ingress: + route: + termination: edge + type: route + resources: # <7> + total: + limits: + memory: Gi + cpu: m +---- +<1> Size of the persistent volume claim for the Tempo WAL. The default is `10Gi`. +<2> Secret you created in step 2 for the object storage that had been set up as one of the prerequisites. +<3> Value of the `name` in the `metadata` of the secret. +<4> Accepted values are `azure` for Azure Blob Storage; `gcs` for Google Cloud Storage; and `s3` for Amazon S3, MinIO, or {odf-full}. +<5> Optional. +<6> Optional: Name of a `ConfigMap` object that contains a CA certificate. +<7> Optional. diff --git a/snippets/idms-global-pull-secret.adoc b/snippets/idms-global-pull-secret.adoc index 84f4f7242bc4..9ed29dae5309 100644 --- a/snippets/idms-global-pull-secret.adoc +++ b/snippets/idms-global-pull-secret.adoc @@ -3,8 +3,11 @@ // * modules/builds-image-source // * modules/images-configuration-registry-mirror -:_mod-docs-content-type: SNIPPET +ifeval::["{context}" == "enabling-windows-container-workloads"] +:winc: +endif::[] +:_mod-docs-content-type: SNIPPET [NOTE] ==== diff --git a/snippets/log6x-clf-samples.yaml b/snippets/log6x-clf-samples.yaml new file mode 100644 index 000000000000..695fad595765 --- /dev/null +++ b/snippets/log6x-clf-samples.yaml @@ -0,0 +1,74 @@ +# YAML supports multiple documents per file, separated by --- +# YAML only support single line comments, and ignores anything after a # +# YAML does not like tabs. Don't use them. Exception for inside a quoted string. + +--- +# AzureMonitor CLF Sample +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder # <1> +metadata: + name: collector +spec: + managementState: Managed + inputs: + - name: selected-app # <2> + application: + excludes: + - container: "*" + namespace: "*aosqe*" + includes: + - container: "*" + namespace: "*user1*" + selector: + matchLabels: {"run": "centos-logtest"} + tuning: + rateLimitPerContainer: + maxRecordsPerSecond: 20 + type: application + - name: infrastructure + type: infrastructure + infrastructure: + sources: + - node + - container + - name: audit + type: audit + audit: + sources: + - auditd + - kubeAPI + - openshiftAPI + - ovn + filters: + - name: drop-logs + type: drop + drop: + - test: + - field: .kubernetes.namespace_name + matches: "openshift-kube*" + outputs: + - azureMonitor: + authentication: + sharedKey: + key: shared_key + secretName: azuremonitor-secret + azureResourceId: "b36631b9-d82e-4e76-9fb2-3c47630d9744" + customerId: "/subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/anliaz86-rg/providers/Microsoft.OperationalInsights/workspaces/logtest-2024" + host: "ods.opinsights.azure.com" + logType: ocp_log + name: azure-log + type: azureMonitor + pipelines: + - inputRefs: + - selected-app + - infrastructure + - audit + name: pipe1 + filterRefs: + - drop-logs + outputRefs: + - azure-log + serviceAccount: + name: "logs-collector" +# <1> Callout test +# <2> Callout testing diff --git a/snippets/lvms-creating-lvmcluster.adoc b/snippets/lvms-creating-lvmcluster.adoc index 5dca8a00f06c..3641fa9a13fb 100644 --- a/snippets/lvms-creating-lvmcluster.adoc +++ b/snippets/lvms-creating-lvmcluster.adoc @@ -6,6 +6,7 @@ apiVersion: lvm.topolvm.io/v1alpha1 kind: LVMCluster metadata: name: my-lvmcluster + namespace: openshift-storage spec: tolerations: - effect: NoSchedule diff --git a/snippets/memory-ballooning.adoc b/snippets/memory-ballooning.adoc new file mode 100644 index 000000000000..fbf513f05e09 --- /dev/null +++ b/snippets/memory-ballooning.adoc @@ -0,0 +1,18 @@ +// Text snippet included in the following modules: +// +// * modules/installation-minimum-resource-requirements.adoc + +:_mod-docs-content-type: SNIPPET + +[IMPORTANT] +==== +Do not use memory ballooning in {product-title} clusters. Memory ballooning can cause cluster-wide instabilities, service degradation, or other undefined behaviors. + +* Control plane machines should have committed memory equal to or greater than the published minimum resource requirements for a cluster installation. + +* Compute machines should have a minimum reservation equal to or greater than the published minimum resource requirements for a cluster installation. + +These minimum CPU and memory requirements do not account for resources required by user workloads. + +For more information, see the Red Hat Knowledgebase article link:https://access.redhat.com/articles/7074533[Memory Ballooning and OpenShift]. +==== diff --git a/snippets/microshift-rhde-compatibility-table-snip.adoc b/snippets/microshift-rhde-compatibility-table-snip.adoc index 81da7b50ae59..a7aead328dc3 100644 --- a/snippets/microshift-rhde-compatibility-table-snip.adoc +++ b/snippets/microshift-rhde-compatibility-table-snip.adoc @@ -16,10 +16,15 @@ ^|*{microshift-short} Release Status* ^|*Supported {microshift-short} Version→{microshift-short} Version Updates* +^|9.4 +^|4.17 +^|Generally Available +^|4.17.0→4.17.z and 4.16→4.17 + ^|9.4 ^|4.16 ^|Generally Available -^|4.16.0→4.16.z, 4.14→4.16 and 4.15→4.16 +^|4.16.0→4.16.z, 4.14→4.16, 4.15→4.16 and 4.16→4.17 ^|9.2, 9.3 ^|4.15 diff --git a/snippets/microshift-update-paths-snip.adoc b/snippets/microshift-update-paths-snip.adoc index df27347d7e1b..dc52e0d402ea 100644 --- a/snippets/microshift-update-paths-snip.adoc +++ b/snippets/microshift-update-paths-snip.adoc @@ -12,7 +12,11 @@ Before updating {microshift-short} or {op-system}, determine the compatibilities *{product-title} update paths* +{microshift-short} version 4.17:: +* Version 4.17 to 4.17.z on {op-system} or {op-system-ostree} 9.4 + {microshift-short} version 4.16:: +* Version 4.16 on {op-system} or {op-system-ostree} 9.4 to 4.17 on {op-system} or {op-system-ostree} 9.4 * Version 4.16 to 4.16.z on {op-system} or {op-system-ostree} 9.4 {microshift-short} version 4.15:: diff --git a/snippets/network-flow-matrix.csv b/snippets/network-flow-matrix.csv index 24066b38c8c9..c9c4100fac89 100644 --- a/snippets/network-flow-matrix.csv +++ b/snippets/network-flow-matrix.csv @@ -1,14 +1,16 @@ Direction,Protocol,Port,Namespace,Service,Pod,Container,Node Role,Optional Ingress,TCP,22,Host system service,sshd,,,master,TRUE Ingress,TCP,53,openshift-dns,dns-default,dnf-default,dns,master,FALSE +Ingress,TCP,80,openshift-ingress,router-default,router-default,router,master,FALSE Ingress,TCP,111,Host system service,rpcbind,,,master,TRUE +Ingress,TCP,443,openshift-ingress,router-default,router-default,router,master,FALSE Ingress,TCP,2379,openshift-etcd,etcd,etcd,etcdctl,master,FALSE Ingress,TCP,2380,openshift-etcd,healthz,etcd,etcd,master,FALSE Ingress,TCP,5050,openshift-machine-api,,ironic-proxy,ironic-proxy,master,FALSE Ingress,TCP,6080,openshift-kube-apiserver,,kube-apiserver,kube-apiserver-insecure-readyz,master,FALSE Ingress,TCP,6385,openshift-machine-api,,ironic-proxy,ironic-proxy,master,FALSE Ingress,TCP,6443,openshift-kube-apiserver,apiserver,kube-apiserver,kube-apiserver,master,FALSE -Ingress,TCP,8080,openshift-network-operator ,,network-operator,network-operator,master,FALSE +Ingress,TCP,8080,openshift-network-operator,,network-operator,network-operator,master,FALSE Ingress,TCP,8798,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,machine-config-daemon,master,FALSE Ingress,TCP,9001,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,kube-rbac-proxy,master,FALSE Ingress,TCP,9099,openshift-cluster-version,cluster-version-operator,cluster-version-operator,cluster-version-operator,master,FALSE @@ -60,7 +62,7 @@ Ingress,TCP,9637,openshift-machine-config-operator,kube-rbac-proxy-crio,kube-rba Ingress,TCP,10250,Host system service,kubelet,,,worker,FALSE Ingress,TCP,10256,openshift-ovn-kubernetes,ovnkube,ovnkube,ovnkube-controller,worker,TRUE Ingress,TCP,10300,openshift-cluster-csi-drivers,csi-livenessprobe,csi-driver-node,csi-driver,worker,FALSE -Ingress,TCP,10309,openshift-cluster-csi-drivers,csi-node-driver-registrar,csi-driver-node,csi-node-driver-registrar,worker,FALSE +Ingress,TCP,10309,openshift-cluster-csi-drivers,csi-node-driver,csi-driver-node,csi-node-driver-registrar,worker,FALSE Ingress,TCP,18080,openshift-kni-infra,,coredns,coredns,worker,FALSE Ingress,UDP,53,openshift-dns,dns-default,dnf-default,dns,worker,FALSE Ingress,UDP,111,Host system service,rpcbind,,,worker,TRUE diff --git a/snippets/network-observability-netobserv-cli-install-warning.adoc b/snippets/network-observability-netobserv-cli-install-warning.adoc new file mode 100644 index 000000000000..478b51f73a9e --- /dev/null +++ b/snippets/network-observability-netobserv-cli-install-warning.adoc @@ -0,0 +1,15 @@ +// Text snippet included in the following assemblies: +// +// +// +// Text snippet included in the following modules: +// +// * observability/network_observability/network-observability-operator-release-notes.adoc +// * observability/network_observability/netobserv_cli/netobserv-cli-install.adoc + +:_mod-docs-content-type: SNIPPET + +[WARNING] +==== +The Network Observability CLI (`oc netobserv`) is temporarily unavailable and is expected to resolve with link:https://issues.redhat.com/browse/OCPBUGS-36146[OCPBUGS-36146]. +==== \ No newline at end of file diff --git a/snippets/olmv1-multi-catalog-admon.adoc b/snippets/olmv1-multi-catalog-admon.adoc index 9d85d0cd12f6..df1a395680c2 100644 --- a/snippets/olmv1-multi-catalog-admon.adoc +++ b/snippets/olmv1-multi-catalog-admon.adoc @@ -8,6 +8,6 @@ ==== If you try to install an Operator or extension that does not have unique name, the installation might fail or lead to an unpredictable result. This occurs for the following reasons: -* If mulitple catalogs are installed on a cluster, {olmv1} does not include a mechanism to specify a catalog when you install an Operator or extension. -* Dependency resolution in {olmv1-first} requires that all of the Operators and extensions that are available to install on a cluster use a unique name for their bundles and packages. +* If mulitple catalogs are installed on a cluster, {olmv1-first} does not include a mechanism to specify a catalog when you install an Operator or extension. +* {olmv1} requires that all of the Operators and extensions that are available to install on a cluster use a unique name for their bundles and packages. ==== diff --git a/snippets/olmv1-rukpak-does-not-support-fips.adoc b/snippets/olmv1-rukpak-does-not-support-fips.adoc new file mode 100644 index 000000000000..641e5a267cba --- /dev/null +++ b/snippets/olmv1-rukpak-does-not-support-fips.adoc @@ -0,0 +1,14 @@ +// Text snippet included in the following assemblies: +// +// * +// +// Text snippet included in the following modules: +// +// * + +:_mod-docs-content-type: SNIPPET + +[WARNING] +==== +RukPak, a Technology Preview component, does not support FIPS. In {product-title} {product-version}, {olmv1-first} depends on RukPak. As a result, RukPak and {olmv1} do not run on clusters with FIPS mode enabled. +==== diff --git a/snippets/olmv1-tp-extension-support.adoc b/snippets/olmv1-tp-extension-support.adoc new file mode 100644 index 000000000000..cf42f82fa35e --- /dev/null +++ b/snippets/olmv1-tp-extension-support.adoc @@ -0,0 +1,19 @@ +// Text snippet included in the following modules: +// +// * modules/olmv1-installing-an-operator.adoc +// * release_notes/ocp-4-16-release-notes.adoc (enteprise-4.16 branch only) +// * release_notes/ocp-4-15-release-notes.adoc (enteprise-4.15 branch only) + +:_mod-docs-content-type: SNIPPET + +[IMPORTANT] +==== +{olmv1} does not support dependency resolution. If an extension declares dependencies for other APIs or packages, the dependencies must be present on the cluster before you attempt to install the extension. + +Currently, {olmv1} supports the installation of extensions that meet the following criteria: + +* The extension must use the `AllNamespaces` install mode. +* The extension must not use webhooks. + +Cluster extensions that use webhooks or that target a single or specified set of namespaces cannot be installed. +==== diff --git a/snippets/ossm-current-version-support-snippet.adoc b/snippets/ossm-current-version-support-snippet.adoc index 0a600c68061f..41a3d8a66b95 100644 --- a/snippets/ossm-current-version-support-snippet.adoc +++ b/snippets/ossm-current-version-support-snippet.adoc @@ -4,4 +4,4 @@ :_mod-docs-content-type: SNIPPET -The most current version of the {SMProductName} Operator can be used with all supported versions of {SMProductShortName}. The version of {SMProductShortName} is specified using the `ServiceMeshControlPlane`. +The most current version of the {SMProductName} Operator can be used with all supported versions of {SMProductShortName}. The version of {SMProductShortName} is specified by using the `ServiceMeshControlPlane` resource. diff --git a/snippets/technology-preview.adoc b/snippets/technology-preview.adoc index a91b9795f01d..9c1acea0a89b 100644 --- a/snippets/technology-preview.adoc +++ b/snippets/technology-preview.adoc @@ -9,4 +9,4 @@ For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. ==== // Undefine {FeatureName} attribute, so that any mistakes are easily spotted -:!FeatureName: The Alibaba Cloud installation with Assisted Installer +:!FeatureName: diff --git a/snippets/telco-core_02-ocs-external-storagecluster.yaml b/snippets/telco-core_02-ocs-external-storagecluster.yaml index 592e5ba9c3f7..a81262303067 100644 --- a/snippets/telco-core_02-ocs-external-storagecluster.yaml +++ b/snippets/telco-core_02-ocs-external-storagecluster.yaml @@ -10,3 +10,5 @@ spec: externalStorage: enable: true labelSelector: {} +status: + phase: Ready diff --git a/snippets/telco-core_ClusterLogSubscription.yaml b/snippets/telco-core_ClusterLogSubscription.yaml index 25f04189e302..8b55deb55525 100644 --- a/snippets/telco-core_ClusterLogSubscription.yaml +++ b/snippets/telco-core_ClusterLogSubscription.yaml @@ -9,3 +9,5 @@ spec: source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic +status: + state: AtLatestKnown diff --git a/snippets/telco-core_NMState.yaml b/snippets/telco-core_NMState.yaml new file mode 100644 index 000000000000..132b9018f2bb --- /dev/null +++ b/snippets/telco-core_NMState.yaml @@ -0,0 +1,5 @@ +apiVersion: nmstate.io/v1 +kind: NMState +metadata: + name: nmstate +spec: {} diff --git a/snippets/telco-core_NMStateNS.yaml b/snippets/telco-core_NMStateNS.yaml new file mode 100644 index 000000000000..88e1df8ba609 --- /dev/null +++ b/snippets/telco-core_NMStateNS.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-nmstate + annotations: + workload.openshift.io/allowed: management diff --git a/snippets/telco-core_NMStateOperGroup.yaml b/snippets/telco-core_NMStateOperGroup.yaml new file mode 100644 index 000000000000..66acda69a1f2 --- /dev/null +++ b/snippets/telco-core_NMStateOperGroup.yaml @@ -0,0 +1,8 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-nmstate + namespace: openshift-nmstate +spec: + targetNamespaces: + - openshift-nmstate diff --git a/snippets/telco-core_NMStateSubscription.yaml b/snippets/telco-core_NMStateSubscription.yaml new file mode 100644 index 000000000000..34c9792a8e5d --- /dev/null +++ b/snippets/telco-core_NMStateSubscription.yaml @@ -0,0 +1,13 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: kubernetes-nmstate-operator + namespace: openshift-nmstate +spec: + channel: "stable" + name: kubernetes-nmstate-operator + source: redhat-operators-disconnected + sourceNamespace: openshift-marketplace + installPlanApproval: Automatic +status: + state: AtLatestKnown diff --git a/snippets/telco-core_Scheduler.yaml b/snippets/telco-core_Scheduler.yaml new file mode 100644 index 000000000000..89bae11288d2 --- /dev/null +++ b/snippets/telco-core_Scheduler.yaml @@ -0,0 +1,10 @@ +apiVersion: config.openshift.io/v1 +kind: Scheduler +metadata: + name: cluster +spec: + # non-schedulable control plane is the default. This ensures + # compliance. + mastersSchedulable: false + policy: + name: "" diff --git a/snippets/telco-core_SriovOperatorConfig.yaml b/snippets/telco-core_SriovOperatorConfig.yaml index 8e13cfcf0f20..6dabca42d728 100644 --- a/snippets/telco-core_SriovOperatorConfig.yaml +++ b/snippets/telco-core_SriovOperatorConfig.yaml @@ -11,3 +11,5 @@ spec: node-role.kubernetes.io/worker: "" enableInjector: true enableOperatorWebhook: true + disableDrain: false + logLevel: 2 diff --git a/snippets/telco-core_SriovSubscription.yaml b/snippets/telco-core_SriovSubscription.yaml index d3badc35f336..f50dd4fe8dd4 100644 --- a/snippets/telco-core_SriovSubscription.yaml +++ b/snippets/telco-core_SriovSubscription.yaml @@ -11,3 +11,5 @@ spec: source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic +status: + state: AtLatestKnown diff --git a/snippets/telco-core_bgp-advr.yaml b/snippets/telco-core_bgp-advr.yaml index ef0a45cdf519..9fe859874bbb 100644 --- a/snippets/telco-core_bgp-advr.yaml +++ b/snippets/telco-core_bgp-advr.yaml @@ -14,10 +14,12 @@ spec: # eg: # - peer-one + # communities: [$communities] - # Note correlation with address pool. + # Note correlation with address pool, or Community # eg: + # - bgpcommunity # - 65535:65282 aggregationLength: 32 aggregationLengthV6: 128 diff --git a/snippets/telco-core_bgp-peer.yaml b/snippets/telco-core_bgp-peer.yaml index 0f71716217e1..7e7fb6280394 100644 --- a/snippets/telco-core_bgp-peer.yaml +++ b/snippets/telco-core_bgp-peer.yaml @@ -1,6 +1,6 @@ # required # count: 1-N -apiVersion: metallb.io/v1beta1 +apiVersion: metallb.io/v1beta2 kind: BGPPeer metadata: name: $name @@ -11,3 +11,4 @@ spec: myASN: $myasn # eg 64500 routerID: $id # eg 10.10.10.10 bfdProfile: bfdprofile + passwordSecret: {} diff --git a/snippets/telco-core_catalog-source.yaml b/snippets/telco-core_catalog-source.yaml index cea0e5cb5f08..b78c3eba8ba7 100644 --- a/snippets/telco-core_catalog-source.yaml +++ b/snippets/telco-core_catalog-source.yaml @@ -13,6 +13,6 @@ spec: # updateStrategy: # registryPoll: # interval: 1h -#status: -# connectionState: -# lastObservedState: READY +status: + connectionState: + lastObservedState: READY diff --git a/snippets/telco-core_community.yaml b/snippets/telco-core_community.yaml new file mode 100644 index 000000000000..04cc76482a42 --- /dev/null +++ b/snippets/telco-core_community.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: metallb.io/v1beta1 +kind: Community +metadata: + name: bgpcommunity + namespace: metallb-system +spec: + communities: [$comm] diff --git a/snippets/telco-core_control-plane-load-kernel-modules.yaml b/snippets/telco-core_control-plane-load-kernel-modules.yaml index f20693b42d18..3468d128c17c 100644 --- a/snippets/telco-core_control-plane-load-kernel-modules.yaml +++ b/snippets/telco-core_control-plane-load-kernel-modules.yaml @@ -19,7 +19,7 @@ spec: overwrite: true path: /etc/modprobe.d/kernel-blacklist.conf - contents: - source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwp4dF91MzI= + source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwo= mode: 420 overwrite: true path: /etc/modules-load.d/kernel-load.conf diff --git a/snippets/telco-core_metallbSubscription.yaml b/snippets/telco-core_metallbSubscription.yaml index 01c7f463f14f..3736a8bf2547 100644 --- a/snippets/telco-core_metallbSubscription.yaml +++ b/snippets/telco-core_metallbSubscription.yaml @@ -12,3 +12,5 @@ spec: source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic +status: + state: AtLatestKnown diff --git a/snippets/telco-core_monitoring-config-cm.yaml b/snippets/telco-core_monitoring-config-cm.yaml index 08f2d17abb13..73d525042f4d 100644 --- a/snippets/telco-core_monitoring-config-cm.yaml +++ b/snippets/telco-core_monitoring-config-cm.yaml @@ -8,9 +8,6 @@ metadata: namespace: openshift-monitoring data: config.yaml: | - k8sPrometheusAdapter: - dedicatedServiceMonitors: - enabled: true prometheusK8s: retention: 15d volumeClaimTemplate: diff --git a/snippets/telco-core_odfSubscription.yaml b/snippets/telco-core_odfSubscription.yaml index 865ff3a7759b..2c50f02d9d52 100644 --- a/snippets/telco-core_odfSubscription.yaml +++ b/snippets/telco-core_odfSubscription.yaml @@ -12,3 +12,5 @@ spec: source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic +status: + state: AtLatestKnown diff --git a/snippets/telco-core_worker-load-kernel-modules.yaml b/snippets/telco-core_worker-load-kernel-modules.yaml index ff1546601e9e..50b630da38ac 100644 --- a/snippets/telco-core_worker-load-kernel-modules.yaml +++ b/snippets/telco-core_worker-load-kernel-modules.yaml @@ -19,7 +19,7 @@ spec: overwrite: true path: /etc/modprobe.d/kernel-blacklist.conf - contents: - source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwp4dF91MzI= + source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwo= mode: 420 overwrite: true path: /etc/modules-load.d/kernel-load.conf diff --git a/snippets/xfs-filesystem-snippet.adoc b/snippets/xfs-filesystem-snippet.adoc new file mode 100644 index 000000000000..a0040145ed84 --- /dev/null +++ b/snippets/xfs-filesystem-snippet.adoc @@ -0,0 +1,21 @@ +// Text snippet included in the following modules: +// +// * modules/oadp-1-3-backing-csi-snapshots.adoc +// * backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc + +:_mod-docs-content-type: SNIPPET + +[NOTE] +==== +If you format the volume by using XFS filesystem and the volume is at 100% capacity, the backup fails with a `no space left on device` error. For example: + +[source,terminal] +---- +Error: relabel failed /var/lib/kubelet/pods/3ac..34/volumes/ \ +kubernetes.io~csi/pvc-684..12c/mount: lsetxattr /var/lib/kubelet/ \ +pods/3ac..34/volumes/kubernetes.io~csi/pvc-68..2c/mount/data-xfs-103: \ +no space left on device +---- + +In this scenario, consider resizing the volume or using a different filesystem type, for example, `ext4`, so that the backup completes successfully. +==== diff --git a/snippets/ztp_01-container-mount-ns-and-kubelet-conf-master.yaml b/snippets/ztp_01-container-mount-ns-and-kubelet-conf-master.yaml index 4678a3ffe604..a7d206dbe4bf 100644 --- a/snippets/ztp_01-container-mount-ns-and-kubelet-conf-master.yaml +++ b/snippets/ztp_01-container-mount-ns-and-kubelet-conf-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_01-container-mount-ns-and-kubelet-conf-worker.yaml b/snippets/ztp_01-container-mount-ns-and-kubelet-conf-worker.yaml index bedf700823c1..3ba2dcb73503 100644 --- a/snippets/ztp_01-container-mount-ns-and-kubelet-conf-worker.yaml +++ b/snippets/ztp_01-container-mount-ns-and-kubelet-conf-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_03-sctp-machine-config-master.yaml b/snippets/ztp_03-sctp-machine-config-master.yaml index cf4d830154af..34ef791a3167 100644 --- a/snippets/ztp_03-sctp-machine-config-master.yaml +++ b/snippets/ztp_03-sctp-machine-config-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_03-sctp-machine-config-worker.yaml b/snippets/ztp_03-sctp-machine-config-worker.yaml index 440d036045d0..daa4abe8f2a0 100644 --- a/snippets/ztp_03-sctp-machine-config-worker.yaml +++ b/snippets/ztp_03-sctp-machine-config-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_06-kdump-master.yaml b/snippets/ztp_06-kdump-master.yaml index 97b00cac89cb..e49aba82fe29 100644 --- a/snippets/ztp_06-kdump-master.yaml +++ b/snippets/ztp_06-kdump-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_06-kdump-worker.yaml b/snippets/ztp_06-kdump-worker.yaml index 640435445c5c..940dad158280 100644 --- a/snippets/ztp_06-kdump-worker.yaml +++ b/snippets/ztp_06-kdump-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_07-sriov-related-kernel-args-master.yaml b/snippets/ztp_07-sriov-related-kernel-args-master.yaml index 6b191623a69b..dae73835f0b0 100644 --- a/snippets/ztp_07-sriov-related-kernel-args-master.yaml +++ b/snippets/ztp_07-sriov-related-kernel-args-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_07-sriov-related-kernel-args-worker.yaml b/snippets/ztp_07-sriov-related-kernel-args-worker.yaml index a36936fb4f92..1dfeff223ea0 100644 --- a/snippets/ztp_07-sriov-related-kernel-args-worker.yaml +++ b/snippets/ztp_07-sriov-related-kernel-args-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_08-set-rcu-normal-master.yaml b/snippets/ztp_08-set-rcu-normal-master.yaml index d358e5c7bd69..f93ae78ea099 100644 --- a/snippets/ztp_08-set-rcu-normal-master.yaml +++ b/snippets/ztp_08-set-rcu-normal-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_08-set-rcu-normal-worker.yaml b/snippets/ztp_08-set-rcu-normal-worker.yaml index 5e823cc63750..f1b0bb3720d2 100644 --- a/snippets/ztp_08-set-rcu-normal-worker.yaml +++ b/snippets/ztp_08-set-rcu-normal-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_09-openshift-marketplace-ns.yaml b/snippets/ztp_09-openshift-marketplace-ns.yaml index 4171de4780ba..ee89c621c146 100644 --- a/snippets/ztp_09-openshift-marketplace-ns.yaml +++ b/snippets/ztp_09-openshift-marketplace-ns.yaml @@ -1,3 +1,5 @@ +# Taken from https://github.com/operator-framework/operator-marketplace/blob/53c124a3f0edfd151652e1f23c87dd39ed7646bb/manifests/01_namespace.yaml +# Update it as the source evolves. apiVersion: v1 kind: Namespace metadata: diff --git a/snippets/ztp_99-crio-disable-wipe-master.yaml b/snippets/ztp_99-crio-disable-wipe-master.yaml index 8117a918620a..34b036de44c1 100644 --- a/snippets/ztp_99-crio-disable-wipe-master.yaml +++ b/snippets/ztp_99-crio-disable-wipe-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_99-crio-disable-wipe-worker.yaml b/snippets/ztp_99-crio-disable-wipe-worker.yaml index f90c9b154591..fc4206396140 100644 --- a/snippets/ztp_99-crio-disable-wipe-worker.yaml +++ b/snippets/ztp_99-crio-disable-wipe-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_99-sync-time-once-master.yaml b/snippets/ztp_99-sync-time-once-master.yaml index 5195ba9fb877..09ea1b4d0839 100644 --- a/snippets/ztp_99-sync-time-once-master.yaml +++ b/snippets/ztp_99-sync-time-once-master.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: diff --git a/snippets/ztp_99-sync-time-once-worker.yaml b/snippets/ztp_99-sync-time-once-worker.yaml index 2675a637ed2e..2c4ca027704b 100644 --- a/snippets/ztp_99-sync-time-once-worker.yaml +++ b/snippets/ztp_99-sync-time-once-worker.yaml @@ -1,3 +1,5 @@ +# Automatically generated by extra-manifests-builder +# Do not make changes directly. apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: @@ -14,6 +16,7 @@ spec: [Unit] Description=Sync time once After=network-online.target + Wants=network-online.target [Service] Type=oneshot TimeoutStartSec=300 diff --git a/snippets/ztp_ClusterLogForwarder.yaml b/snippets/ztp_ClusterLogForwarder.yaml index 5afd24c48700..b555fd55d491 100644 --- a/snippets/ztp_ClusterLogForwarder.yaml +++ b/snippets/ztp_ClusterLogForwarder.yaml @@ -5,5 +5,28 @@ metadata: namespace: openshift-logging annotations: {} spec: - outputs: $outputs - pipelines: $pipelines +# outputs: $outputs +# pipelines: $pipelines + +#apiVersion: "logging.openshift.io/v1" +#kind: ClusterLogForwarder +#metadata: +# name: instance +# namespace: openshift-logging +#spec: +# outputs: +# - type: "kafka" +# name: kafka-open +# url: tcp://10.46.55.190:9092/test +# pipelines: +# - inputRefs: +# - audit +# - infrastructure +# labels: +# label1: test1 +# label2: test2 +# label3: test3 +# label4: test4 +# name: all-to-default +# outputRefs: +# - kafka-open diff --git a/snippets/ztp_ClusterLogging.yaml b/snippets/ztp_ClusterLogging.yaml index a83b3a3114bc..ac8ff3ba0059 100644 --- a/snippets/ztp_ClusterLogging.yaml +++ b/snippets/ztp_ClusterLogging.yaml @@ -7,5 +7,4 @@ metadata: spec: managementState: "Managed" collection: - logs: - type: "vector" + type: "vector" diff --git a/snippets/ztp_DisconnectedICSP.yaml b/snippets/ztp_DisconnectedICSP.yaml index 1dde92c31d6c..defa3cece489 100644 --- a/snippets/ztp_DisconnectedICSP.yaml +++ b/snippets/ztp_DisconnectedICSP.yaml @@ -4,5 +4,5 @@ metadata: name: disconnected-internal-icsp annotations: {} spec: - repositoryDigestMirrors: - - $mirrors +# repositoryDigestMirrors: +# - $mirrors diff --git a/snippets/ztp_ImageBasedUpgrade.yaml b/snippets/ztp_ImageBasedUpgrade.yaml new file mode 100644 index 000000000000..53547c51dda3 --- /dev/null +++ b/snippets/ztp_ImageBasedUpgrade.yaml @@ -0,0 +1,10 @@ +apiVersion: lca.openshift.io/v1alpha1 +kind: ImageBasedUpgrade +metadata: + name: upgrade +spec: + stage: Idle + # When setting `stage: Prep`, remember to add the seed image reference object below. + # seedImageRef: + # image: $image + # version: $version diff --git a/snippets/ztp_LVMOperatorStatus.yaml b/snippets/ztp_LVMOperatorStatus.yaml new file mode 100644 index 000000000000..553eb7ae8253 --- /dev/null +++ b/snippets/ztp_LVMOperatorStatus.yaml @@ -0,0 +1,25 @@ +# This CR verifies the installation/upgrade of the Sriov Network Operator +apiVersion: operators.coreos.com/v1 +kind: Operator +metadata: + name: lvms-operator.openshift-storage + annotations: {} +status: + components: + refs: + - kind: Subscription + namespace: openshift-storage + conditions: + - type: CatalogSourcesUnhealthy + status: "False" + - kind: InstallPlan + namespace: openshift-storage + conditions: + - type: Installed + status: "True" + - kind: ClusterServiceVersion + namespace: openshift-storage + conditions: + - type: Succeeded + status: "True" + reason: InstallSucceeded diff --git a/snippets/ztp_LcaSubscription.yaml b/snippets/ztp_LcaSubscription.yaml new file mode 100644 index 000000000000..41a95ca4a0fe --- /dev/null +++ b/snippets/ztp_LcaSubscription.yaml @@ -0,0 +1,14 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: lifecycle-agent + namespace: openshift-lifecycle-agent + annotations: {} +spec: + channel: "stable" + name: lifecycle-agent + source: redhat-operators-disconnected + sourceNamespace: openshift-marketplace + installPlanApproval: Manual +status: + state: AtLatestKnown diff --git a/snippets/ztp_LcaSubscriptionNS.yaml b/snippets/ztp_LcaSubscriptionNS.yaml new file mode 100644 index 000000000000..33598fcccf90 --- /dev/null +++ b/snippets/ztp_LcaSubscriptionNS.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-lifecycle-agent + annotations: {} + labels: + kubernetes.io/metadata.name: openshift-lifecycle-agent diff --git a/snippets/ztp_LcaSubscriptionOperGroup.yaml b/snippets/ztp_LcaSubscriptionOperGroup.yaml new file mode 100644 index 000000000000..f06c05da84f3 --- /dev/null +++ b/snippets/ztp_LcaSubscriptionOperGroup.yaml @@ -0,0 +1,9 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: lifecycle-agent + namespace: openshift-lifecycle-agent + annotations: {} +spec: + targetNamespaces: + - openshift-lifecycle-agent diff --git a/snippets/ztp_PerformanceProfile.yaml b/snippets/ztp_PerformanceProfile.yaml index f4350be40cb7..eef6e5384822 100644 --- a/snippets/ztp_PerformanceProfile.yaml +++ b/snippets/ztp_PerformanceProfile.yaml @@ -3,7 +3,7 @@ kind: PerformanceProfile metadata: # if you change this name make sure the 'include' line in TunedPerformancePatch.yaml # matches this name: include=openshift-node-performance-${PerformanceProfile.metadata.name} - # Also in file 'validatorCRs/informDuValidator.yaml': + # Also in file 'validatorCRs/informDuValidator.yaml': # name: 50-performance-${PerformanceProfile.metadata.name} name: openshift-node-performance-profile annotations: diff --git a/snippets/ztp_PtpConfigBoundaryForEvent.yaml b/snippets/ztp_PtpConfigBoundaryForEvent.yaml new file mode 100644 index 000000000000..335e897e7b8e --- /dev/null +++ b/snippets/ztp_PtpConfigBoundaryForEvent.yaml @@ -0,0 +1,131 @@ +apiVersion: ptp.openshift.io/v1 +kind: PtpConfig +metadata: + name: boundary + namespace: openshift-ptp + annotations: {} +spec: + profile: + - name: "boundary" + ptp4lOpts: "-2 --summary_interval -4" + phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: "true" + ptp4lConf: | + # The interface name is hardware-specific + [$iface_slave] + masterOnly 0 + [$iface_master_1] + masterOnly 1 + [$iface_master_2] + masterOnly 1 + [$iface_master_3] + masterOnly 1 + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 0 + priority1 128 + priority2 128 + domainNumber 24 + #utc_offset 37 + clockClass 248 + clockAccuracy 0xFE + offsetScaledLogVariance 0xFFFF + free_running 0 + freq_est_interval 1 + dscp_event 0 + dscp_general 0 + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 128 + # + # Port Data Set + # + logAnnounceInterval -3 + logSyncInterval -4 + logMinDelayReqInterval -4 + logMinPdelayReqInterval -4 + announceReceiptTimeout 3 + syncReceiptTimeout 0 + delayAsymmetry 0 + fault_reset_interval -4 + neighborPropDelayThresh 20000000 + masterOnly 0 + G.8275.portDS.localPriority 128 + # + # Run time options + # + assume_two_step 0 + logging_level 6 + path_trace_enabled 0 + follow_up_info 0 + hybrid_e2e 0 + inhibit_multicast_service 0 + net_sync_monitor 0 + tc_spanning_tree 0 + tx_timestamp_timeout 50 + unicast_listen 0 + unicast_master_table 0 + unicast_req_duration 3600 + use_syslog 1 + verbose 0 + summary_interval 0 + kernel_leap 1 + check_fup_sync 0 + clock_class_threshold 135 + # + # Servo Options + # + pi_proportional_const 0.0 + pi_integral_const 0.0 + pi_proportional_scale 0.0 + pi_proportional_exponent -0.3 + pi_proportional_norm_max 0.7 + pi_integral_scale 0.0 + pi_integral_exponent 0.4 + pi_integral_norm_max 0.3 + step_threshold 2.0 + first_step_threshold 0.00002 + max_frequency 900000000 + clock_servo pi + sanity_freq_limit 200000000 + ntpshm_segment 0 + # + # Transport options + # + transportSpecific 0x0 + ptp_dst_mac 01:1B:19:00:00:00 + p2p_dst_mac 01:80:C2:00:00:0E + udp_ttl 1 + udp6_scope 0x0E + uds_address /var/run/ptp4l + # + # Default interface options + # + clock_type BC + network_transport L2 + delay_mechanism E2E + time_stamping hardware + tsproc_mode filter + delay_filter moving_median + delay_filter_length 10 + egressLatency 0 + ingressLatency 0 + boundary_clock_jbod 0 + # + # Clock description + # + productDescription ;; + revisionData ;; + manufacturerIdentity 00:00:00 + userDescription ; + timeSource 0xA0 + recommend: + - profile: "boundary" + priority: 4 + match: + - nodeLabel: "node-role.kubernetes.io/$mcp" diff --git a/snippets/ztp_PtpConfigDualCardGmWpc.yaml b/snippets/ztp_PtpConfigDualCardGmWpc.yaml index 2b6034dffcf2..ba985821df0d 100644 --- a/snippets/ztp_PtpConfigDualCardGmWpc.yaml +++ b/snippets/ztp_PtpConfigDualCardGmWpc.yaml @@ -1,4 +1,7 @@ -# 2 cards $iface_master and $iface_master_1 are connected via SMA1 ports by a cable and $iface_master_1 receives 1PPS signals from $iface_master +# The grandmaster profile is provided for testing only +# It is not installed on production clusters +# In this example two cards $iface_nic1 and $iface_nic2 are connected via +# SMA1 ports by a cable and $iface_nic2 receives 1PPS signals from $iface_nic1 apiVersion: ptp.openshift.io/v1 kind: PtpConfig metadata: @@ -9,7 +12,7 @@ spec: profile: - name: "grandmaster" ptp4lOpts: "-2 --summary_interval -4" - phc2sysOpts: -r -u 0 -m -O -37 -N 8 -R 16 -s $iface_master -n 24 + phc2sysOpts: -r -u 0 -m -w -N 8 -R 16 -s $iface_nic1 -n 24 ptpSchedulingPolicy: SCHED_FIFO ptpSchedulingPriority: 10 ptpSettings: @@ -22,12 +25,12 @@ spec: LocalHoldoverTimeout: 14400 MaxInSpecOffset: 100 pins: $e810_pins - # "$iface_master": + # "$iface_nic1": # "U.FL2": "0 2" # "U.FL1": "0 1" # "SMA2": "0 2" # "SMA1": "2 1" - # "$iface_master_1": + # "$iface_nic2": # "U.FL2": "0 2" # "U.FL1": "0 1" # "SMA2": "0 2" @@ -86,6 +89,12 @@ spec: - "-p" - "MON-HW" reportOutput: true + - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300 + - "-P" + - "29.20" + - "-p" + - "CFG-MSG,1,38,300" + reportOutput: true ts2phcOpts: " " ts2phcConf: | [nmea] @@ -99,21 +108,30 @@ spec: #example value of gnss_serialport is /dev/ttyGNSS_1700_0 ts2phc.nmea_serialport $gnss_serialport leapfile /usr/share/zoneinfo/leap-seconds.list - [$iface_master] + [$iface_nic1] ts2phc.extts_polarity rising ts2phc.extts_correction 0 - [$iface_master_1] + [$iface_nic2] + ts2phc.master 0 ts2phc.extts_polarity rising #this is a measured value in nanoseconds to compensate for SMA cable delay ts2phc.extts_correction -10 ptp4lConf: | - [$iface_master] + [$iface_nic1] + masterOnly 1 + [$iface_nic1_1] + masterOnly 1 + [$iface_nic1_2] + masterOnly 1 + [$iface_nic1_3] + masterOnly 1 + [$iface_nic2] masterOnly 1 - [$iface_master_1] + [$iface_nic2_1] masterOnly 1 - [$iface_master_1_1] + [$iface_nic2_2] masterOnly 1 - [$iface_master_1_2] + [$iface_nic2_3] masterOnly 1 [global] # diff --git a/snippets/ztp_PtpConfigForHA.yaml b/snippets/ztp_PtpConfigForHA.yaml index 821ec858685a..e72ae4ed1b2b 100644 --- a/snippets/ztp_PtpConfigForHA.yaml +++ b/snippets/ztp_PtpConfigForHA.yaml @@ -3,18 +3,17 @@ kind: PtpConfig metadata: name: boundary-ha namespace: openshift-ptp - annotations: - ran.openshift.io/ztp-deploy-wave: "10" + annotations: {} spec: profile: - name: "boundary-ha" - ptp4lOpts: " " + ptp4lOpts: "" phc2sysOpts: "-a -r -n 24" ptpSchedulingPolicy: SCHED_FIFO ptpSchedulingPriority: 10 ptpSettings: logReduce: "true" - haProfiles: "ha-ptp-config-nic1,ha-ptp-config-nic2" + haProfiles: "$profile1,$profile2" recommend: - profile: "boundary-ha" priority: 4 diff --git a/snippets/ztp_PtpConfigForHAForEvent.yaml b/snippets/ztp_PtpConfigForHAForEvent.yaml new file mode 100644 index 000000000000..684d75a59064 --- /dev/null +++ b/snippets/ztp_PtpConfigForHAForEvent.yaml @@ -0,0 +1,21 @@ +apiVersion: ptp.openshift.io/v1 +kind: PtpConfig +metadata: + name: boundary-ha + namespace: openshift-ptp + annotations: {} +spec: + profile: + - name: "boundary-ha" + ptp4lOpts: " " + phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: "true" + haProfiles: "$profile1,$profile2" + recommend: + - profile: "boundary-ha" + priority: 4 + match: + - nodeLabel: "node-role.kubernetes.io/$mcp" diff --git a/snippets/ztp_PtpConfigGmWpc.yaml b/snippets/ztp_PtpConfigGmWpc.yaml index 45c3151e5189..b084c9aba881 100644 --- a/snippets/ztp_PtpConfigGmWpc.yaml +++ b/snippets/ztp_PtpConfigGmWpc.yaml @@ -1,3 +1,5 @@ +# The grandmaster profile is provided for testing only +# It is not installed on production clusters apiVersion: ptp.openshift.io/v1 kind: PtpConfig metadata: @@ -8,7 +10,7 @@ spec: profile: - name: "grandmaster" ptp4lOpts: "-2 --summary_interval -4" - phc2sysOpts: -r -u 0 -m -O -37 -N 8 -R 16 -s $iface_master -n 24 + phc2sysOpts: -r -u 0 -m -w -N 8 -R 16 -s $iface_master -n 24 ptpSchedulingPolicy: SCHED_FIFO ptpSchedulingPriority: 10 ptpSettings: @@ -80,6 +82,12 @@ spec: - "-p" - "MON-HW" reportOutput: true + - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300 + - "-P" + - "29.20" + - "-p" + - "CFG-MSG,1,38,300" + reportOutput: true ts2phcOpts: " " ts2phcConf: | [nmea] diff --git a/snippets/ztp_PtpConfigMasterForEvent.yaml b/snippets/ztp_PtpConfigMasterForEvent.yaml new file mode 100644 index 000000000000..d3bf33a4dfc9 --- /dev/null +++ b/snippets/ztp_PtpConfigMasterForEvent.yaml @@ -0,0 +1,126 @@ +# The grandmaster profile is provided for testing only +# It is not installed on production clusters +apiVersion: ptp.openshift.io/v1 +kind: PtpConfig +metadata: + name: grandmaster + namespace: openshift-ptp + annotations: {} +spec: + profile: + - name: "grandmaster" + # The interface name is hardware-specific + interface: $interface + ptp4lOpts: "-2 --summary_interval -4" + phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: "true" + ptp4lConf: | + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 0 + priority1 128 + priority2 128 + domainNumber 24 + #utc_offset 37 + clockClass 255 + clockAccuracy 0xFE + offsetScaledLogVariance 0xFFFF + free_running 0 + freq_est_interval 1 + dscp_event 0 + dscp_general 0 + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 128 + # + # Port Data Set + # + logAnnounceInterval -3 + logSyncInterval -4 + logMinDelayReqInterval -4 + logMinPdelayReqInterval -4 + announceReceiptTimeout 3 + syncReceiptTimeout 0 + delayAsymmetry 0 + fault_reset_interval -4 + neighborPropDelayThresh 20000000 + masterOnly 0 + G.8275.portDS.localPriority 128 + # + # Run time options + # + assume_two_step 0 + logging_level 6 + path_trace_enabled 0 + follow_up_info 0 + hybrid_e2e 0 + inhibit_multicast_service 0 + net_sync_monitor 0 + tc_spanning_tree 0 + tx_timestamp_timeout 50 + unicast_listen 0 + unicast_master_table 0 + unicast_req_duration 3600 + use_syslog 1 + verbose 0 + summary_interval 0 + kernel_leap 1 + check_fup_sync 0 + clock_class_threshold 7 + # + # Servo Options + # + pi_proportional_const 0.0 + pi_integral_const 0.0 + pi_proportional_scale 0.0 + pi_proportional_exponent -0.3 + pi_proportional_norm_max 0.7 + pi_integral_scale 0.0 + pi_integral_exponent 0.4 + pi_integral_norm_max 0.3 + step_threshold 2.0 + first_step_threshold 0.00002 + max_frequency 900000000 + clock_servo pi + sanity_freq_limit 200000000 + ntpshm_segment 0 + # + # Transport options + # + transportSpecific 0x0 + ptp_dst_mac 01:1B:19:00:00:00 + p2p_dst_mac 01:80:C2:00:00:0E + udp_ttl 1 + udp6_scope 0x0E + uds_address /var/run/ptp4l + # + # Default interface options + # + clock_type OC + network_transport L2 + delay_mechanism E2E + time_stamping hardware + tsproc_mode filter + delay_filter moving_median + delay_filter_length 10 + egressLatency 0 + ingressLatency 0 + boundary_clock_jbod 0 + # + # Clock description + # + productDescription ;; + revisionData ;; + manufacturerIdentity 00:00:00 + userDescription ; + timeSource 0xA0 + recommend: + - profile: "grandmaster" + priority: 4 + match: + - nodeLabel: "node-role.kubernetes.io/$mcp" diff --git a/snippets/ztp_PtpConfigSlave.yaml b/snippets/ztp_PtpConfigSlave.yaml index 498ceb067e96..d599942edaa6 100644 --- a/snippets/ztp_PtpConfigSlave.yaml +++ b/snippets/ztp_PtpConfigSlave.yaml @@ -1,7 +1,7 @@ apiVersion: ptp.openshift.io/v1 kind: PtpConfig metadata: - name: slave + name: du-ptp-slave namespace: openshift-ptp annotations: {} spec: diff --git a/snippets/ztp_PtpConfigSlaveForEvent.yaml b/snippets/ztp_PtpConfigSlaveForEvent.yaml new file mode 100644 index 000000000000..ecb721cb15bb --- /dev/null +++ b/snippets/ztp_PtpConfigSlaveForEvent.yaml @@ -0,0 +1,124 @@ +apiVersion: ptp.openshift.io/v1 +kind: PtpConfig +metadata: + name: du-ptp-slave + namespace: openshift-ptp + annotations: {} +spec: + profile: + - name: "slave" + # The interface name is hardware-specific + interface: $interface + ptp4lOpts: "-2 -s --summary_interval -4" + phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: "true" + ptp4lConf: | + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 1 + priority1 128 + priority2 128 + domainNumber 24 + #utc_offset 37 + clockClass 255 + clockAccuracy 0xFE + offsetScaledLogVariance 0xFFFF + free_running 0 + freq_est_interval 1 + dscp_event 0 + dscp_general 0 + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 128 + # + # Port Data Set + # + logAnnounceInterval -3 + logSyncInterval -4 + logMinDelayReqInterval -4 + logMinPdelayReqInterval -4 + announceReceiptTimeout 3 + syncReceiptTimeout 0 + delayAsymmetry 0 + fault_reset_interval -4 + neighborPropDelayThresh 20000000 + masterOnly 0 + G.8275.portDS.localPriority 128 + # + # Run time options + # + assume_two_step 0 + logging_level 6 + path_trace_enabled 0 + follow_up_info 0 + hybrid_e2e 0 + inhibit_multicast_service 0 + net_sync_monitor 0 + tc_spanning_tree 0 + tx_timestamp_timeout 50 + unicast_listen 0 + unicast_master_table 0 + unicast_req_duration 3600 + use_syslog 1 + verbose 0 + summary_interval 0 + kernel_leap 1 + check_fup_sync 0 + clock_class_threshold 7 + # + # Servo Options + # + pi_proportional_const 0.0 + pi_integral_const 0.0 + pi_proportional_scale 0.0 + pi_proportional_exponent -0.3 + pi_proportional_norm_max 0.7 + pi_integral_scale 0.0 + pi_integral_exponent 0.4 + pi_integral_norm_max 0.3 + step_threshold 2.0 + first_step_threshold 0.00002 + max_frequency 900000000 + clock_servo pi + sanity_freq_limit 200000000 + ntpshm_segment 0 + # + # Transport options + # + transportSpecific 0x0 + ptp_dst_mac 01:1B:19:00:00:00 + p2p_dst_mac 01:80:C2:00:00:0E + udp_ttl 1 + udp6_scope 0x0E + uds_address /var/run/ptp4l + # + # Default interface options + # + clock_type OC + network_transport L2 + delay_mechanism E2E + time_stamping hardware + tsproc_mode filter + delay_filter moving_median + delay_filter_length 10 + egressLatency 0 + ingressLatency 0 + boundary_clock_jbod 0 + # + # Clock description + # + productDescription ;; + revisionData ;; + manufacturerIdentity 00:00:00 + userDescription ; + timeSource 0xA0 + recommend: + - profile: "slave" + priority: 4 + match: + - nodeLabel: "node-role.kubernetes.io/$mcp" diff --git a/snippets/ztp_SriovNetworkNodePolicy.yaml b/snippets/ztp_SriovNetworkNodePolicy.yaml index 8a8dedde903b..58a3298ec753 100644 --- a/snippets/ztp_SriovNetworkNodePolicy.yaml +++ b/snippets/ztp_SriovNetworkNodePolicy.yaml @@ -11,7 +11,7 @@ spec: deviceType: $deviceType isRdma: $isRdma nicSelector: - # The exact physical function name must match the hardware used + # The exact physical function name must match the hardware used pfNames: [$pfNames] nodeSelector: node-role.kubernetes.io/$mcp: "" diff --git a/snippets/ztp_SriovOperatorConfig.yaml b/snippets/ztp_SriovOperatorConfig.yaml index 270f7383e30b..1a7bd5b97b87 100644 --- a/snippets/ztp_SriovOperatorConfig.yaml +++ b/snippets/ztp_SriovOperatorConfig.yaml @@ -3,8 +3,7 @@ kind: SriovOperatorConfig metadata: name: default namespace: openshift-sriov-network-operator - annotations: - ran.openshift.io/ztp-deploy-wave: "10" + annotations: {} spec: configDaemonNodeSelector: "node-role.kubernetes.io/$mcp": "" @@ -23,6 +22,4 @@ spec: # openshift.io/: "1" enableInjector: false enableOperatorWebhook: false - # Disable drain is needed for single-node OpenShift. - disableDrain: true logLevel: 0 diff --git a/snippets/ztp_SriovOperatorConfigForSNO.yaml b/snippets/ztp_SriovOperatorConfigForSNO.yaml new file mode 100644 index 000000000000..8ca169c0cfa3 --- /dev/null +++ b/snippets/ztp_SriovOperatorConfigForSNO.yaml @@ -0,0 +1,27 @@ +apiVersion: sriovnetwork.openshift.io/v1 +kind: SriovOperatorConfig +metadata: + name: default + namespace: openshift-sriov-network-operator + annotations: {} +spec: + configDaemonNodeSelector: + "node-role.kubernetes.io/$mcp": "" + # Injector and OperatorWebhook pods can be disabled (set to "false") below + # to reduce the number of management pods. It is recommended to start with the + # webhook and injector pods enabled, and only disable them after verifying the + # correctness of user manifests. + # If the injector is disabled, containers using sr-iov resources must explicitly assign + # them in the "requests"/"limits" section of the container spec, for example: + # containers: + # - name: my-sriov-workload-container + # resources: + # limits: + # openshift.io/: "1" + # requests: + # openshift.io/: "1" + enableInjector: false + enableOperatorWebhook: false + # Disable drain is needed for Single Node Openshift + disableDrain: true + logLevel: 0 diff --git a/snippets/ztp_StorageLVMCluster.yaml b/snippets/ztp_StorageLVMCluster.yaml index b37664026423..f00ab557605e 100644 --- a/snippets/ztp_StorageLVMCluster.yaml +++ b/snippets/ztp_StorageLVMCluster.yaml @@ -1,16 +1,16 @@ apiVersion: lvm.topolvm.io/v1alpha1 kind: LVMCluster metadata: - name: odf-lvmcluster + name: lvmcluster namespace: openshift-storage -spec: - storage: - deviceClasses: - - name: vg1 - deviceSelector: - paths: - - /usr/disk/by-path/pci-0000:11:00.0-nvme-1 - thinPoolConfig: - name: thin-pool-1 - overprovisionRatio: 10 - sizePercent: 90 + annotations: {} +spec: {} +#example: creating a vg1 volume group leveraging all available disks on the node +# except the installation disk. +# storage: +# deviceClasses: +# - name: vg1 +# thinPoolConfig: +# name: thin-pool-1 +# sizePercent: 90 +# overprovisionRatio: 10 diff --git a/snippets/ztp_StorageLVMSubscription.yaml b/snippets/ztp_StorageLVMSubscription.yaml new file mode 100644 index 000000000000..7afb38e4a6c2 --- /dev/null +++ b/snippets/ztp_StorageLVMSubscription.yaml @@ -0,0 +1,14 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: lvms-operator + namespace: openshift-storage + annotations: {} +spec: + channel: "stable" + name: lvms-operator + source: redhat-operators-disconnected + sourceNamespace: openshift-marketplace + installPlanApproval: Manual +status: + state: AtLatestKnown diff --git a/snippets/ztp_StorageLVMSubscriptionNS.yaml b/snippets/ztp_StorageLVMSubscriptionNS.yaml new file mode 100644 index 000000000000..97e84c011bf2 --- /dev/null +++ b/snippets/ztp_StorageLVMSubscriptionNS.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-storage + labels: + openshift.io/cluster-monitoring: "true" + annotations: {} diff --git a/snippets/ztp_StorageLVMSubscriptionOperGroup.yaml b/snippets/ztp_StorageLVMSubscriptionOperGroup.yaml new file mode 100644 index 000000000000..7612b15ee8ef --- /dev/null +++ b/snippets/ztp_StorageLVMSubscriptionOperGroup.yaml @@ -0,0 +1,9 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: lvms-operator-operatorgroup + namespace: openshift-storage + annotations: {} +spec: + targetNamespaces: + - openshift-storage diff --git a/snippets/ztp_TunedPerformancePatch.yaml b/snippets/ztp_TunedPerformancePatch.yaml index 5e47d72b63f5..d1e03eb9031b 100644 --- a/snippets/ztp_TunedPerformancePatch.yaml +++ b/snippets/ztp_TunedPerformancePatch.yaml @@ -3,8 +3,7 @@ kind: Tuned metadata: name: performance-patch namespace: openshift-cluster-node-tuning-operator - annotations: - ran.openshift.io/ztp-deploy-wave: "10" + annotations: {} spec: profile: - name: performance-patch diff --git a/snippets/ztp_example-sno.yaml b/snippets/ztp_example-sno.yaml index 6d7fec894272..b336f735bb62 100644 --- a/snippets/ztp_example-sno.yaml +++ b/snippets/ztp_example-sno.yaml @@ -9,145 +9,144 @@ spec: baseDomain: "example.com" pullSecretRef: name: "assisted-deployment-pull-secret" - clusterImageSetNameRef: "openshift-4.10" + clusterImageSetNameRef: "openshift-4.16" sshPublicKey: "ssh-rsa AAAA..." clusters: - - clusterName: "example-sno" - networkType: "OVNKubernetes" - # installConfigOverrides is a generic way of passing install-config - # parameters through the siteConfig. The 'capabilities' field configures - # the composable openshift feature. In this 'capabilities' setting, we - # remove all but the marketplace component from the optional set of - # components. - # Notes: - # - OperatorLifecycleManager is needed for 4.15 and later - # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier - # - Ingress is needed for 4.16 and later - installConfigOverrides: | - { - "capabilities": { - "baselineCapabilitySet": "None", - "additionalEnabledCapabilities": [ - "NodeTuning", - "OperatorLifecycleManager" - "Ingress" - ] + - clusterName: "example-sno" + networkType: "OVNKubernetes" + # installConfigOverrides is a generic way of passing install-config + # parameters through the siteConfig. The 'capabilities' field configures + # the composable openshift feature. In this 'capabilities' setting, we + # remove all the optional set of components. + # Notes: + # - OperatorLifecycleManager is needed for 4.15 and later + # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier + # - Ingress is needed for 4.16 and later + installConfigOverrides: | + { + "capabilities": { + "baselineCapabilitySet": "None", + "additionalEnabledCapabilities": [ + "NodeTuning", + "OperatorLifecycleManager", + "Ingress" + ] + } } - } - # It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+. - # The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest. - # extraManifestPath: sno-extra-manifest - clusterLabels: - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples - du-profile: "latest" - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: - # ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true' - common: true - # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' - group-du-sno: "" - # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' - # Normally this should match or contain the cluster name so it only applies to a single cluster - sites : "example-sno" - clusterNetwork: - - cidr: 1001:1::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 1111:2222:3333:4444::/64 - serviceNetwork: - - 1001:2::/112 - additionalNTPSources: - - 1111:2222:3333:4444::2 - # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate - # please see Workload Partitioning Feature for a complete guide. - cpuPartitioningMode: AllNodes - # Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster: - #crTemplates: - # KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml" - nodes: - - hostName: "example-node1.example.com" - role: "master" - # Optionally; This can be used to configure desired BIOS setting on a host: - #biosConfigRef: - # filePath: "example-hw.profile" - bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" - bmcCredentialsName: - name: "example-node1-bmh-secret" - bootMACAddress: "AA:BB:CC:DD:EE:11" - # Use UEFISecureBoot to enable secure boot - bootMode: "UEFI" - rootDeviceHints: - deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0" - # disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details - ignitionConfigOverride: | - { - "ignition": { - "version": "3.2.0" - }, - "storage": { - "disks": [ - { - "device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62", - "partitions": [ - { - "label": "var-lib-containers", - "sizeMiB": 0, - "startMiB": 250000 + # It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+. + # The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest. + # extraManifestPath: sno-extra-manifest + clusterLabels: + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples + du-profile: "latest" + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: + # ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true' + common: true + # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' + group-du-sno: "" + # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' + # Normally this should match or contain the cluster name so it only applies to a single cluster + sites: "example-sno" + clusterNetwork: + - cidr: 1001:1::/48 + hostPrefix: 64 + machineNetwork: + - cidr: 1111:2222:3333:4444::/64 + serviceNetwork: + - 1001:2::/112 + additionalNTPSources: + - 1111:2222:3333:4444::2 + # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate + # please see Workload Partitioning Feature for a complete guide. + cpuPartitioningMode: AllNodes + # Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster: + #crTemplates: + # KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml" + nodes: + - hostName: "example-node1.example.com" + role: "master" + # Optionally; This can be used to configure desired BIOS setting on a host: + #biosConfigRef: + # filePath: "example-hw.profile" + bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" + bmcCredentialsName: + name: "example-node1-bmh-secret" + bootMACAddress: "AA:BB:CC:DD:EE:11" + # Use UEFISecureBoot to enable secure boot + bootMode: "UEFI" + rootDeviceHints: + deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0" + # disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details + ignitionConfigOverride: | + { + "ignition": { + "version": "3.2.0" + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62", + "partitions": [ + { + "label": "var-lib-containers", + "sizeMiB": 0, + "startMiB": 250000 + } + ], + "wipeTable": false } - ], - "wipeTable": false - } - ], - "filesystems": [ - { - "device": "/dev/disk/by-partlabel/var-lib-containers", - "format": "xfs", - "mountOptions": [ - "defaults", - "prjquota" ], - "path": "/var/lib/containers", - "wipeFilesystem": true - } - ] - }, - "systemd": { - "units": [ - { - "contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target", - "enabled": true, - "name": "var-lib-containers.mount" - } - ] + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var-lib-containers", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var/lib/containers", + "wipeFilesystem": true + } + ] + }, + "systemd": { + "units": [ + { + "contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target", + "enabled": true, + "name": "var-lib-containers.mount" + } + ] + } } - } - nodeNetwork: - interfaces: - - name: eno1 - macAddress: "AA:BB:CC:DD:EE:11" - config: + nodeNetwork: interfaces: - name: eno1 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: true - address: - # For SNO sites with static IP addresses, the node-specific, - # API and Ingress IPs should all be the same and configured on - # the interface - - ip: 1111:2222:3333:4444::aaaa:1 - prefix-length: 64 - dns-resolver: - config: - search: - - example.com - server: - - 1111:2222:3333:4444::2 - routes: - config: - - destination: ::/0 - next-hop-interface: eno1 - next-hop-address: 1111:2222:3333:4444::1 - table-id: 254 \ No newline at end of file + macAddress: "AA:BB:CC:DD:EE:11" + config: + interfaces: + - name: eno1 + type: ethernet + state: up + ipv4: + enabled: false + ipv6: + enabled: true + address: + # For SNO sites with static IP addresses, the node-specific, + # API and Ingress IPs should all be the same and configured on + # the interface + - ip: 1111:2222:3333:4444::aaaa:1 + prefix-length: 64 + dns-resolver: + config: + search: + - example.com + server: + - 1111:2222:3333:4444::2 + routes: + config: + - destination: ::/0 + next-hop-interface: eno1 + next-hop-address: 1111:2222:3333:4444::1 + table-id: 254 diff --git a/storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc b/storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc index 2ef819e24388..f0eb576fe85a 100644 --- a/storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc @@ -6,7 +6,12 @@ include::_attributes/common-attributes.adoc[] toc::[] -// Content similar to osd-persistent-storage-csi-aws-efs.adoc and rosa-persistent-storage-aws-efs-csi.adoc. Modules are reused. +ifdef::openshift-dedicated,openshift-rosa[] +[IMPORTANT] +==== +This procedure is specific to the link:https://github.com/openshift/aws-efs-csi-driver-operator[AWS EFS CSI Driver Operator] (a Red Hat Operator), which is only applicable for {product-title} 4.10 and later versions. +==== +endif::openshift-dedicated,openshift-rosa[] == Overview @@ -32,19 +37,30 @@ include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] :FeatureName: AWS EFS include::modules/persistent-storage-efs-csi-driver-operator-setup.adoc[leveloffset=+1] -ifdef::openshift-rosa,openshift-enterprise[] +// Obtaining a role ARN (OCP) +ifndef::openshift-dedicated,openshift-rosa[] include::modules/persistent-storage-csi-efs-sts.adoc[leveloffset=+2] +endif::openshift-dedicated,openshift-rosa[] +// Obtaining a role ARN (OSD and ROSA) +ifdef::openshift-dedicated,openshift-rosa[] +include::modules/sd-persistent-storage-csi-efs-sts.adoc[leveloffset=+2] +endif::openshift-dedicated,openshift-rosa[] + +.Next steps xref:../../storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc#persistent-storage-csi-olm-operator-install_persistent-storage-csi-aws-efs[Install the AWS EFS CSI Driver Operator]. + [role="_additional-resources"] .Additional resources * xref:../../storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc#persistent-storage-csi-olm-operator-install_persistent-storage-csi-aws-efs[Installing the AWS EFS CSI Driver Operator] +ifndef::openshift-dedicated,openshift-rosa[] * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#cco-ccoctl-configuring_installing-aws-customizations[Configuring the Cloud Credential Operator utility] +endif::openshift-dedicated,openshift-rosa[] * xref:../../storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc#persistent-storage-csi-efs-driver-install_persistent-storage-csi-aws-efs[Installing the {FeatureName} CSI Driver] -endif::[] include::modules/persistent-storage-csi-olm-operator-install.adoc[leveloffset=+2] +.Next steps xref:../../storage/container_storage_interface/persistent-storage-csi-aws-efs.adoc#persistent-storage-csi-efs-driver-install_persistent-storage-csi-aws-efs[Install the AWS EFS CSI Driver]. include::modules/persistent-storage-csi-efs-driver-install.adoc[leveloffset=+2] @@ -55,7 +71,9 @@ include::modules/storage-create-storage-class.adoc[leveloffset=+1] include::modules/storage-create-storage-class-console.adoc[leveloffset=+2] include::modules/storage-create-storage-class-cli.adoc[leveloffset=+2] +ifndef::openshift-dedicated,openshift-rosa[] include::modules/persistent-storage-csi-efs-cross-account.adoc[leveloffset=+1] +endif::openshift-dedicated,openshift-rosa[] include::modules/persistent-storage-csi-efs-create-volume.adoc[leveloffset=+1] diff --git a/storage/container_storage_interface/persistent-storage-csi-azure.adoc b/storage/container_storage_interface/persistent-storage-csi-azure.adoc index 3bbdd15a91d2..002dbf02ac99 100644 --- a/storage/container_storage_interface/persistent-storage-csi-azure.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-azure.adoc @@ -35,7 +35,7 @@ include::modules/persistent-storage-byok.adoc[leveloffset=+1] If the OS (root) disk is encrypted, and there is no encrypted key defined in the storage class, Azure Disk CSI driver uses the OS disk encryption key by default to encrypt provisioned storage volumes. ==== -For information about installing with user-managed encryption for Azure, see xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc[Enabling user-managed encryption for Azure]. +For information about installing with user-managed encryption for Azure, see xref:../../installing/installing_azure/ipi/installing-azure-preparing-ipi.adoc#preparing-disk-encryption-sets_installing-azure-preparing-ipi[Enabling user-managed encryption for Azure]. endif::openshift-rosa,openshift-dedicated[] //Machine sets that deploy machines on ultra disks using PVCs diff --git a/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc b/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc index a97405ebebcf..03abb6a0e63c 100644 --- a/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc @@ -29,13 +29,10 @@ For new installations, {product-title} 4.13 and later provides automatic migrati CSI automatic migration should be seamless. Migration does not change how you use all existing API objects, such as persistent volumes, persistent volume claims, and storage classes. ==== -[NOTE] -==== -The vSphere CSI Driver supports dynamic and static provisioning. When using static provisioning in the PV specifications, do not use the key `storage.kubernetes.io/csiProvisionerIdentity` in `csi.volumeAttributes` because this key indicates dynamically provisioned PVs. -==== - include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] +include::modules/persistent-storage-csi-vsphere-limitations.adoc[leveloffset=+1] + include::modules/persistent-storage-csi-vsphere-stor-policy.adoc[leveloffset=+1] include::modules/persistent-storage-csi-vsphere-rwx.adoc[leveloffset=+1] diff --git a/storage/persistent_storage/persistent-storage-ocs.adoc b/storage/persistent_storage/persistent-storage-ocs.adoc index 141b6bdc5723..4b3c04aff898 100644 --- a/storage/persistent_storage/persistent-storage-ocs.adoc +++ b/storage/persistent_storage/persistent-storage-ocs.adoc @@ -6,9 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -{rh-storage-first} is a provider of agnostic persistent storage for {product-title} supporting file, block, and object storage, either in-house or in hybrid clouds. As a Red Hat storage solution, {rh-storage-first} is completely integrated with {product-title} for deployment, management, and monitoring. - -{rh-storage-first} provides its own documentation library. The complete set of {rh-storage-first} documentation is available at https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation. +{rh-storage-first} is a provider of agnostic persistent storage for {product-title} supporting file, block, and object storage, either in-house or in hybrid clouds. As a Red Hat storage solution, {rh-storage-first} is completely integrated with {product-title} for deployment, management, and monitoring. For more information, see the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation[{rh-storage-first} documentation]. [IMPORTANT] ==== diff --git a/support/approved-access.adoc b/support/approved-access.adoc index 2de8ffa6ec37..9c2dfd614dc0 100644 --- a/support/approved-access.adoc +++ b/support/approved-access.adoc @@ -9,9 +9,9 @@ endif::[] toc::[] -Red{nbsp}Hat Site Reliability Engineering (SRE) typically does not require access to systems containing customer data as part of normal operations to manage and support {product-title} clusters. In the unlikely event that SRE needs access to systems containing customer data, you can use the _Approved Access_ interface to review and _approve_ or _deny_ access to these systems. +Red{nbsp}Hat Site Reliability Engineering (SRE) typically does not require an elevated access to systems as part of normal operations to manage and support {product-title} clusters. In the unlikely event that SRE needs elevated access to systems, you can use the _Approved Access_ interface to review and _approve_ or _deny_ access to these systems. -Access requests to customer data on {product-rosa} clusters and the corresponding cloud accounts can be created by SRE either in response to a customer-initiated support ticket or in response to alerts received by SRE as part of the standard incident response process. +Elevated access requests to clusters on {product-rosa} clusters and the corresponding cloud accounts can be created by SRE either in response to a customer-initiated support ticket or in response to alerts received by SRE as part of the standard incident response process. When _Approved Access_ is enabled and an SRE creates an access request, _cluster owners_ receive an email notification informing them of a new access request. The email notification contains a link allowing the cluster owner to quickly approve or deny the access request. You must respond in a timely manner otherwise there is a risk to your SLA for {product-rosa}. @@ -22,7 +22,6 @@ When _Approved Access_ is enabled and an SRE creates an access request, _cluster ==== Denying an access request requires you to complete the *Justification* field. In this case, SRE can not directly act on the resources related to the incident. Customers can still use the link:https://access.redhat.com/support/cases/#/case/list[*Customer Support*] to help investigate and resolve any issues. ==== -// Approved access include::modules/support-submitting-a-case-enable-approved-access.adoc[leveloffset=+1] include::modules/support-reviewing-an-access-request-from-an-email-notification.adoc[leveloffset=+1] diff --git a/support/troubleshooting/osd-managed-resources.adoc b/support/troubleshooting/osd-managed-resources.adoc deleted file mode 100644 index cb47fcfd4339..000000000000 --- a/support/troubleshooting/osd-managed-resources.adoc +++ /dev/null @@ -1,54 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-managed-resources"] -= {product-title} managed resources -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-managed-resources - -toc::[] - -[id="osd-managed-resources-overview"] -== Overview - -The following covers all resources managed or protected by the Service Reliability Engineering Platform (SRE-P) Team. Customers should not attempt to modify these resources because doing so can lead to cluster instability. - -[id="osd-managed-resources-all"] -== Hive managed resources - -The following list displays the {product-title} resources managed by OpenShift Hive, the centralized fleet configuration management system. These resources are in addition to the OpenShift Container Platform resources created during installation. OpenShift Hive continually attempts to maintain consistency across all {product-title} clusters. Changes to {product-title} resources should be made through {cluster-manager} so that {cluster-manager} and Hive are synchronized. Contact ocm-feedback@redhat.com if {cluster-manager} does not support modifying the resources in question. - -.List of Hive managed resources -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/managed/all-osd-resources.yaml[] ----- -==== - -[id="osd-add-on-managed-namespaces"] -== {product-title} add-on namespaces - -{product-title} add-ons are services available for installation after cluster installation. These additional services include {openshift-dev-spaces-productname}, Red Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces can be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. - -.List of add-on managed namespaces -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/addons-namespaces/main.yaml[] ----- -==== - -[id="osd-validating-webhooks"] -== {product-title} validating webhooks - -{product-title} validating webhooks are a set of dynamic admission controls maintained by the OpenShift SRE team. These HTTP callbacks, also known as webhooks, are called for various types of requests to ensure cluster stability. The following list describes the various webhooks with rules containing the registered operations and resources that are controlled. Any attempt to circumvent these validating webhooks could affect the stability and supportability of the cluster. - -.List of validating webhooks -[%collapsible] -==== -[source,json] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-validating-webhooks/master/docs/webhooks.json[] ----- -==== diff --git a/support/troubleshooting/rosa-managed-resources.adoc b/support/troubleshooting/sd-managed-resources.adoc similarity index 80% rename from support/troubleshooting/rosa-managed-resources.adoc rename to support/troubleshooting/sd-managed-resources.adoc index 5216afbbd599..2621159d7ac3 100644 --- a/support/troubleshooting/rosa-managed-resources.adoc +++ b/support/troubleshooting/sd-managed-resources.adoc @@ -1,17 +1,17 @@ :_mod-docs-content-type: ASSEMBLY -[id="rosa-managed-resources"] +[id="sd-managed-resources"] = {product-title} managed resources include::_attributes/attributes-openshift-dedicated.adoc[] -:context: rosa-managed-resources +:context: sd-managed-resources toc::[] -[id="rosa-managed-resources-overview"] +[id="sd-managed-resources-overview_{context}"] == Overview The following covers all resources managed or protected by the Service Reliability Engineering Platform (SRE-P) Team. Customers should not attempt to modify these resources because doing so can lead to cluster instability. -[id="rosa-managed-resources-all"] +[id="sd-managed-resources-all_{context}"] == Hive managed resources The following list displays the {product-title} resources managed by OpenShift Hive, the centralized fleet configuration management system. These resources are in addition to the OpenShift Container Platform resources created during installation. OpenShift Hive continually attempts to maintain consistency across all {product-title} clusters. Changes to {product-title} resources should be made through {cluster-manager} so that {cluster-manager} and Hive are synchronized. Contact ocm-feedback@redhat.com if {cluster-manager} does not support modifying the resources in question. @@ -25,7 +25,21 @@ include::https://raw.githubusercontent.com/openshift/managed-cluster-config/mast ---- ==== -[id="rosa-add-on-managed-namespaces"] +[id="sd-core-namespaces_{context}"] +== {product-title} core namespaces + +{product-title} core namespaces are installed by default during cluster installation. + +.List of core namespaces +[%collapsible] +==== +[source,yaml] +---- +include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/deploy/osd-managed-resources/ocp-namespaces.ConfigMap.yaml[] +---- +==== + +[id="sd-add-on-managed-namespaces_{context}"] == {product-title} add-on namespaces {product-title} add-ons are services available for installation after cluster installation. These additional services include {openshift-dev-spaces-productname}, Red{nbsp}Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces can be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. @@ -39,7 +53,7 @@ include::https://raw.githubusercontent.com/openshift/managed-cluster-config/mast ---- ==== -[id="rosa-validating-webhooks"] +[id="sd-validating-webhooks_{context}"] == {product-title} validating webhooks {product-title} validating webhooks are a set of dynamic admission controls maintained by the OpenShift SRE team. These HTTP callbacks, also known as webhooks, are called for various types of requests to ensure cluster stability. The following list describes the various webhooks with rules containing the registered operations and resources that are controlled. Any attempt to circumvent these validating webhooks could affect the stability and supportability of the cluster. diff --git a/telco_ref_design_specs/core/telco-core-ref-design-components.adoc b/telco_ref_design_specs/core/telco-core-ref-design-components.adoc deleted file mode 100644 index 7e0a011b1107..000000000000 --- a/telco_ref_design_specs/core/telco-core-ref-design-components.adoc +++ /dev/null @@ -1,128 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:telco-core: -[id="telco-core-ref-components"] -= {rds-caps} reference design components -:context: core-ref-design-components -include::_attributes/common-attributes.adoc[] - -toc::[] - -The following sections describe the various {product-title} components and configurations that you use to configure and deploy clusters to run {rds} workloads. - -include::modules/telco-core-cpu-partitioning-performance-tune.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-cpu-infra-container_cnf-master[Tuning nodes for low latency with the performance profile] - -* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] - -include::modules/telco-core-service-mesh.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/service_mesh/v2x/ossm-about.html[About OpenShift Service Mesh] - -include::modules/telco-core-rds-networking.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/networking/understanding-networking.html[Understanding networking] - -include::modules/telco-core-cluster-network-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/networking/cluster-network-operator.html#nw-cluster-network-operator_cluster-network-operator[Cluster Network Operator] - -include::modules/telco-core-load-balancer.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/networking/metallb/about-metallb.html[About MetalLB and the MetalLB Operator] - -include::modules/telco-core-sriov.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/networking/hardware_networks/about-sriov.html[About SR-IOV hardware networks] - -include::modules/telco-nmstate-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/networking/k8s_nmstate/k8s-nmstate-about-the-k8s-nmstate-operator.html[About the Kubernetes NMState Operator] - -include::modules/telco-core-logging.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/observability/logging/cluster-logging.html[About logging] - -include::modules/telco-core-power-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-configuring-power-saving-for-nodes_cnf-low-latency-perf-profile[Configuring power saving for nodes that run colocated high and low priority workloads] - -include::modules/telco-core-storage.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.16[Product Documentation for Red Hat OpenShift Data Foundation] - -include::modules/telco-core-monitoring.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/observability/monitoring/monitoring-overview.html#about-openshift-monitoring[About {product-version} monitoring] - -include::modules/telco-core-scheduling.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/nodes/scheduling/nodes-scheduler-about.html[Controlling pod placement using the scheduler] - -* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-numa-aware-scheduling.html[Scheduling NUMA-aware workloads] - -include::modules/telco-core-installation.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.html[Installing an {product-title} cluster with the Agent-based Installer] - -include::modules/telco-core-security.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/authentication/managing-security-context-constraints.html[Managing security context constraints] - -include::modules/telco-core-scalability.adoc[leveloffset=+1] - -[id="telco-core-additional-config"] -== Additional configuration - -include::modules/telco-core-rds-disconnected.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/container-platform/4.16/updating/updating_a_cluster/updating_disconnected_cluster/index.html[About cluster updates in a disconnected environment] - -include::modules/telco-core-kernel.adoc[leveloffset=+2] - -:!telco-core: diff --git a/updating/preparing_for_updates/preparing-manual-creds-update.adoc b/updating/preparing_for_updates/preparing-manual-creds-update.adoc index 6fa031d9bd27..58bd5b9d3d69 100644 --- a/updating/preparing_for_updates/preparing-manual-creds-update.adoc +++ b/updating/preparing_for_updates/preparing-manual-creds-update.adoc @@ -62,7 +62,7 @@ include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] -* xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] +* xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] * xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#manually-create-iam_installing-azure-stack-hub-default[Manually creating long-term credentials for Azure Stack Hub] * xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] * xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-manual-upgrade-annotation_preparing-manual-creds-update[Indicating that the cluster is ready to upgrade] diff --git a/updating/updating_a_cluster/migrating-to-multi-payload.adoc b/updating/updating_a_cluster/migrating-to-multi-payload.adoc index 0420a0052d7a..e0caf07b65a7 100644 --- a/updating/updating_a_cluster/migrating-to-multi-payload.adoc +++ b/updating/updating_a_cluster/migrating-to-multi-payload.adoc @@ -29,10 +29,10 @@ include::modules/migrating-to-multi-arch-cli.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources * xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc#multi-architecture-configuration[Configuring multi-architecture compute machines on an {product-title} cluster] -* xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc#multiarch-tuning-operator[Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator]. +* xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc#multiarch-tuning-operator[Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator]. * xref:../../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating a cluster using the web console] * xref:../../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[Updating a cluster using the CLI] * xref:../../updating/understanding_updates/intro-to-updates.adoc#understanding-clusterversion-conditiontypes_understanding-openshift-updates[Understanding cluster version condition types] * xref:../../updating/understanding_updates/understanding-update-channels-release.adoc#understanding-update-channels-releases[Understanding update channels and releases] -* xref:../../installing/installing-preparing.adoc#installing-preparing-selecting-cluster-type[Selecting a cluster installation type] +* xref:../../installing/overview/installing-preparing.adoc#installing-preparing-selecting-cluster-type[Selecting a cluster installation type] * xref:../../machine_management/deploying-machine-health-checks.adoc#machine-health-checks-about_deploying-machine-health-checks[About machine health checks] diff --git a/upgrading/rosa-hcp-upgrading.adoc b/upgrading/rosa-hcp-upgrading.adoc index 5aaafb638953..553fc69313b1 100644 --- a/upgrading/rosa-hcp-upgrading.adoc +++ b/upgrading/rosa-hcp-upgrading.adoc @@ -6,25 +6,56 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -You can upgrade {hcp-title-first} clusters by individually upgrading the hosted control plane and the machine pools with the ROSA command line interface (CLI), `rosa`. +include::modules/rosa-hcp-upgrade-options.adoc[leveloffset=+1] -Use one of the following methods to upgrade your HCP clusters: +.Additional resources +* xref:../cli_reference/rosa_cli/rosa-manage-objects-cli.adoc#rosa-edit-machinepool_rosa-managing-objects-cli[ROSA CLI reference: `rosa edit machinepool`] -* Upgrade only your hosted control plane. This does not impact your worker nodes. -* Upgrade only your machine pool. This initiates a rolling reboot of a specific machine pool and temporarily impacts the worker nodes on the specific machine pool. It does not impact all your worker nodes if you have multiple machine pools. -* Upgrade multiple machine pools simultaneously. This initiates a rolling reboot of worker nodes in the updated machine pools. This allows for updating multiple nodes simultaneously within a cluster. -* Upgrade your hosted control plane first and then your machine pool. -+ -[NOTE] -==== -If you want to upgrade both your hosted control plane and your machine pool to the same version, you must upgrade the hosted control plane first. -==== +//This cannot be a module if we want to use the xrefs +[id="rosa-lifecycle-policy_{context}"] +== Life cycle policies and planning -To plan an upgrade, review the xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-life-cycle.adoc#rosa-hcp-life-cycle[{hcp-title} update life cycle] documentation. The life cycle page includes release definitions, support and upgrade requirements, installation policy information, and life cycle dates. +To plan an upgrade, review the +ifdef::openshift-rosa,openshift-rosa-classic[] +xref:../rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc#rosa-life-cycle[{product-title} update life cycle]. +endif::openshift-rosa,openshift-rosa-classic[] +ifdef::openshift-rosa-hcp[] +xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-life-cycle.adoc#rosa-hcp-life-cycle[{hcp-title} update life cycle]. +endif::openshift-rosa-hcp[] -[NOTE] -==== -Hosted control plane upgrade duration varies based on your workload configuration, and machine pool upgrade duration varies based on the number of worker nodes. -==== +The life cycle page includes release definitions, support and upgrade requirements, installation policy information and life cycle dates. -include::modules/rosa-hcp-upgrading-cli-tutorial.adoc[leveloffset=+1] +Upgrades are manually initiated or automatically scheduled. Red Hat Site Reliability Engineers (SREs) monitor upgrade progress and remedy any issues encountered. + +include::modules/rosa-hcp-upgrading-cli-control-plane.adoc[leveloffset=+1] + +include::modules/rosa-hcp-upgrading-cli-machinepool.adoc[leveloffset=+1] + +[id="rosa-hcp-upgrading-cli-cluster_{context}"] +== Upgrading the whole cluster with the ROSA CLI + +Upgrading the entire cluster involves upgrading both the hosted control plane and nodes in the machine pools. However, these components cannot be upgraded at the same time. They must be upgraded in sequence. This can be done in any order. However, to maintain compatibility between nodes in the cluster, nodes in machine pools cannot use a newer version than the hosted control plane. Therefore, if both the hosted control plane and the nodes in your machine pools require upgrade to the same OpenShift version, you must upgrade the hosted control plane first, followed by the machine pools. + +[discrete] +=== Prerequisites +* You have installed and configured the latest version of the ROSA CLI. +* No other upgrades are in progress or scheduled to take place at the same time as this upgrade. + +ifdef::context[:prevcontext: {context}] + +:context: rosa-hcp-upgrading-whole-cluster + +include::modules/rosa-hcp-upgrading-cli-control-plane.adoc[leveloffset=+2] + +ifdef::prevcontext[:context: {prevcontext}] + +ifdef::context[:prevcontext: {context}] + +:context: rosa-hcp-upgrading-whole-cluster + +include::modules/rosa-hcp-upgrading-cli-machinepool.adoc[leveloffset=+2] + +ifdef::prevcontext[:context: {prevcontext}] +ifndef::prevcontext[:!context:] + +//include::modules/rosa-hcp-upgrading-cli-tutorial.adoc[leveloffset=+1] \ No newline at end of file diff --git a/virt/about_virt/virt-architecture.adoc b/virt/about_virt/virt-architecture.adoc index ae902a55b452..bba543f5d4dc 100644 --- a/virt/about_virt/virt-architecture.adoc +++ b/virt/about_virt/virt-architecture.adoc @@ -12,7 +12,6 @@ The Operator Lifecycle Manager (OLM) deploys operator pods for each component of * Storage: `cdi-operator` * Network: `cluster-network-addons-operator` * Scaling: `ssp-operator` -* Templating: `tekton-tasks-operator` OLM also deploys the `hyperconverged-cluster-operator` pod, which is responsible for the deployment, configuration, and life cycle of other components, and several helper pods: `hco-webhook`, and `hyperconverged-cluster-cli-download`. diff --git a/virt/backup_restore/virt-backup-restore-overview.adoc b/virt/backup_restore/virt-backup-restore-overview.adoc index 6b6dce95c5e8..3630074a4c1a 100644 --- a/virt/backup_restore/virt-backup-restore-overview.adoc +++ b/virt/backup_restore/virt-backup-restore-overview.adoc @@ -9,9 +9,23 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can install the {oadp-first} with {VirtProductName} by installing the {oadp-short} Operator and configuring a backup location. Then, you can install the Data Protection Application. +[IMPORTANT] +==== +Red Hat supports using {VirtProductName} 4.14 or later with {oadp-short} 1.3.x or later. + +{oadp-short} versions earlier than 1.3.0 are not supported for back up and restore of {VirtProductName}. +==== +ifndef::openshift-rosa,openshift-dedicated[] Back up and restore virtual machines by using the xref:../../backup_and_restore/index.adoc#application-backup-restore-operations-overview[{oadp-full}]. +endif::openshift-rosa,openshift-dedicated[] + +ifdef::openshift-rosa,openshift-dedicated[] +Back up and restore virtual machines by using the {oadp-full}. +endif::openshift-rosa,openshift-dedicated[] + +You can install the {oadp-first} with {VirtProductName} by installing the {oadp-short} Operator and configuring a backup location. You can then install the Data Protection Application. + [NOTE] ==== @@ -27,12 +41,20 @@ The following storage options are excluded: * Volume snapshot backup and restore +ifndef::openshift-rosa,openshift-dedicated[] For more information, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic]. +endif::openshift-rosa,openshift-dedicated[] ==== -To install the {oadp-short} Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. + +To install the {oadp-short} Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. + +ifndef::openshift-rosa,openshift-dedicated[] +See xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. +endif::openshift-rosa,openshift-dedicated[] include::modules/install-and-configure-oadp-kubevirt.adoc[leveloffset=+1] +ifndef::openshift-rosa,openshift-dedicated[] [role="_additional-resources"] .Additional resources @@ -40,17 +62,10 @@ include::modules/install-and-configure-oadp-kubevirt.adoc[leveloffset=+1] * xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[`Backup` custom resource (CR)] * xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[`Restore` CR] * xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] +endif::openshift-rosa,openshift-dedicated[] include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -[IMPORTANT] -==== -Red Hat supports using {VirtProductName} 4.14 or later with {oadp-short} 1.3.x or later. - -{oadp-short} versions before 1.3.0 are not supported for back up and restore of {VirtProductName}. -==== - :!provider: :!credentials: :!virt-backup-restore-overview: - diff --git a/virt/backup_restore/virt-disaster-recovery.adoc b/virt/backup_restore/virt-disaster-recovery.adoc index d455de9f5d50..da84da6200d9 100644 --- a/virt/backup_restore/virt-disaster-recovery.adoc +++ b/virt/backup_restore/virt-disaster-recovery.adoc @@ -9,3 +9,10 @@ toc::[] {VirtProductName} supports using disaster recovery (DR) solutions to ensure that your environment can recover after a site outage. To use these methods, you must plan your {VirtProductName} deployment in advance. include::modules/virt-about-dr-methods.adoc[leveloffset=+1] +include::modules/virt-defining-apps-for-dr.adoc[leveloffset=+1] +include::modules/virt-vm-behavior-dr.adoc[leveloffset=+1] +include::modules/virt-metro-dr-odf.adoc[leveloffset=+1] + +[role="_additional-resources-dr"] +.Additional resources +* link:https://docs.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10[Red{nbsp}Hat Advanced Cluster Management for Kubernetes 2.10] \ No newline at end of file diff --git a/virt/getting_started/virt-using-the-cli-tools.adoc b/virt/getting_started/virt-using-the-cli-tools.adoc index 779c00d1a161..ca0a4439de8e 100644 --- a/virt/getting_started/virt-using-the-cli-tools.adoc +++ b/virt/getting_started/virt-using-the-cli-tools.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="virt-using-the-cli-tools"] -= Using the virtctl and libguestfs CLI tools += Using the CLI tools include::_attributes/common-attributes.adoc[] :context: virt-using-the-cli-tools :toclevels: 3 @@ -28,3 +28,7 @@ include::modules/virt-deploying-libguestfs-with-virtctl.adoc[leveloffset=+1] include::modules/virt-about-libguestfs-tools-virtctl-guestfs.adoc[leveloffset=+2] +[id="using_ansible_{context}"] +== Using Ansible +To use the Ansible collection for {VirtProductName}, see link:https://console.redhat.com/ansible/automation-hub/repo/published/redhat/openshift_virtualization[Red{nbsp}Hat Ansible Automation Hub] ({hybrid-console}). + diff --git a/virt/getting_started/virt-web-console-overview.adoc b/virt/getting_started/virt-web-console-overview.adoc deleted file mode 100644 index b1c809b3b2ce..000000000000 --- a/virt/getting_started/virt-web-console-overview.adoc +++ /dev/null @@ -1,1860 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="virt-web-console-overview"] -= Web console overview -include::_attributes/common-attributes.adoc[] -:context: virt-web-console-overview -:toclevels: 4 - -toc::[] - -The *Virtualization* section of the {product-title} web console contains the following pages for managing and monitoring your {VirtProductName} environment. - -.*Virtualization* pages -[cols="1,3", options="header"] -|==== -|Page -|Description - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-page_virt-web-console-overview[*Overview* page] -|Manage and monitor the {VirtProductName} environment. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#catalog-page_virt-web-console-overview[*Catalog* page] -|Create virtual machines from a catalog of templates. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachines-page_virt-web-console-overview[*VirtualMachines* page] -|Create and manage virtual machines. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#templates-page_virt-web-console-overview[*Templates* page] -|Create and manage templates. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#instancetypes-page_virt-web-console-overview[*InstanceTypes* page] -|Create and manage virtual machine instance types. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#instancetypes-page_virt-web-console-overview[*Preferences* page] -|Create and manage virtual machine preferences. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#bootablevolumes-page_virt-web-console-overview[*Bootable volumes* page] -|Create and manage DataSources for bootable volumes. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#migrationpolicies-page_virt-web-console-overview[*MigrationPolicies* page] -|Create and manage migration policies for workloads. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#checkups-page_virt-web-console-overview[*Checkups* page] -|Run network latency and storage checkups for virtual machines. -|==== - -.Key -[cols="1,3", options="header"] -|==== -|Icon -|Description - -|image:icon-pencil.png[title="pencil icon",20] -|Edit icon - -|image:icon-link.png[title="link icon",20] -|Link icon - -|image:virt_icon_start.png[title="start icon",20] -|Start VM icon - -|image:virt_icon_stop.png[title="stop icon",20] -|Stop VM icon - -|image:virt_icon_restart.png[title="restart icon",20] -|Restart VM icon - -|image:virt_icon_pause.png[title="pause icon",20] -|Pause VM icon - -|image:virt_icon_unpause.png[title="unpause icon",20] -|Unpause VM icon -|==== - -[id="overview-page_virt-web-console-overview"] -== Overview page - -The *Overview* page displays resources, metrics, migration progress, and cluster-level settings. - -.*Overview* page -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Download virtctl* image:icon-link.png[title="link icon",20] -|Download the `virtctl` command line tool to manage resources. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-overview_virt-web-console-overview[*Overview* tab] -|Resources, usage, alerts, and status. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-top-consumers_virt-web-console-overview[*Top consumers* tab] -|Top consumers of CPU, memory, and storage resources. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-migrations_virt-web-console-overview[*Migrations* tab] -|Status of live migrations. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings_virt-web-console-overview[*Settings* tab] -|The *Settings* tab contains the *Cluster* tab, *User* tab, and *Preview features* tab. - -|*Settings* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[*Cluster* tab] -|{VirtProductName} version, update status, live migration, templates project, load balancer service, guest management, resource management, and SCSI persistent reservation settings. - -|*Settings* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-user_virt-web-console-overview[*User* tab] -|Public SSH keys, user permissions, and welcome information settings. - -|*Settings* -> *Preview features* -|Enable select link:https://access.redhat.com/support/offerings/techpreview/[preview features] in the web console. Features in this tab change frequently. - -Preview features are disabled by default and must not be enabled in production environments. -|==== -===== - -[id="overview-overview_virt-web-console-overview"] -=== Overview tab - -The *Overview* tab displays resources, usage, alerts, and status. - -.*Overview* tab -[%collapsible] -===== -[cols="1a,3a", options="header"] -|==== -|Element -|Description - -|*Getting started resources* card -|* *Quick Starts* tile: Learn how to create, import, and run virtual machines with step-by-step instructions and tasks. -* *Feature highlights* tile: Read the latest information about key virtualization features. -ifndef::openshift-rosa,openshift-dedicated[] -* *Related operators* tile: Install Operators such as the Kubernetes NMState Operator or the {rh-storage} Operator. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] -* *Related operators* tile: Install Operators such as the Kubernetes NMState Operator. -endif::openshift-rosa,openshift-dedicated[] - -|*Memory* tile -|Memory usage, with a chart showing the last 1 day's trend. - -|*Storage* tile -|Storage usage, with a chart showing the last 1 day's trend. - -|*vCPU usage* tile -|vCPU usage, with a chart showing the last 1 day's trend. - -|*VirtualMachines* tile -|Number of virtual machines, with a chart showing the last 1 day's trend. - -|*Alerts* tile|{VirtProductName} alerts, grouped by severity. - -|*VirtualMachine statuses* tile -|Number of virtual machines, grouped by status. - -|*VirtualMachines per resource* chart -|Number of virtual machines created from templates and instance types. -|==== -===== - -[id="overview-top-consumers_virt-web-console-overview"] -=== Top consumers tab - -The *Top consumers* tab displays the top consumers of CPU, memory, and storage. - -.*Top consumers* tab -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*View virtualization dashboard* image:icon-link.png[title="link icon",20] -|Link to *Observe -> Dashboards*, which displays the top consumers for {VirtProductName}. - -|*Time period* list -|Select a time period to filter the results. - -|*Top consumers* list -|Select the number of top consumers to filter the results. - -|*CPU* chart -|Virtual machines with the highest CPU usage. - -|*Memory* chart -|Virtual machines with the highest memory usage. - -|*Memory swap traffic* chart -|Virtual machines with the highest memory swap traffic. - -|*vCPU wait* chart -|Virtual machines with the highest vCPU wait periods. - -|*Storage throughput* chart -|Virtual machines with the highest storage throughput usage. - -|*Storage IOPS* chart -|Virtual machines with the highest storage input/output operations per second usage. -|==== -===== - -[id="overview-migrations_virt-web-console-overview"] -=== Migrations tab - -The *Migrations* tab displays the status of virtual machine migrations. - -.*Migrations* tab -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Time period* list -|Select a time period to filter virtual machine migrations. - -|*VirtualMachineInstanceMigrations information* table -|List of virtual machine migrations. -|==== -===== - -[id="overview-settings_virt-web-console-overview"] -=== Settings tab - -The *Settings* tab displays cluster-wide settings. - -.Tabs on the *Settings* tab -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Tab -|Description - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[*Cluster* tab] -|{VirtProductName} version, update status, live migration, templates project, load balancer service, guest management, resource management, and SCSI persistent reservation settings. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-user_virt-web-console-overview[*User* tab] -|Public SSH key management, user permissions, and welcome information settings. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-preview_virt-web-console-overview[*Preview features* tab] -|Enable select link:https://access.redhat.com/support/offerings/techpreview/[preview features] in the web console. These features change frequently. -|==== -===== - -[id="overview-settings-cluster_virt-web-console-overview"] -==== Cluster tab - -The *Cluster* tab displays the {VirtProductName} version and update status. You configure live migration and other settings on the *Cluster* tab. - -.*Cluster* tab -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Installed version* -|{VirtProductName} version. - -|*Update status* -|{VirtProductName} update status. - -|*Channel* -|{VirtProductName} update channel. - -|*General Settings* section -|Expand this section to configure the *Live migration* settings, the *SSH configuration* settings, and the *Template project* settings. - -|*General Settings* -> *Live Migration* section -|Expand this section to configure live migration settings. - -|*General Settings* -> *Live Migration* -> *Max. migrations per cluster* field -|Select the maximum number of live migrations per cluster. - -|*General Settings* -> *Live Migration* -> *Max. migrations per node* field -|Select the maximum number of live migrations per node. - -|*General Settings* -> *Live Migration* -> *Live migration network* list -|Select a dedicated secondary network for live migration. - -|*General Settings* -> *SSH Configuration* -> *SSH over LoadBalancer service* switch -|Enable the creation of LoadBalancer services for SSH connections to VMs. - -You must configure a load balancer. - -|*General Settings* -> *SSH Configuration* -> *SSH over NodePort service* switch -|Allow the creation of node port services for SSH connections to virtual machines. - -|*General Settings* -> *Template project* section -|Expand this section to select a project for Red Hat templates. The default project is `openshift`. - -To store Red Hat templates in multiple projects, xref:../../virt/getting_started/virt-web-console-overview.adoc#templates-page_virt-web-console-overview[clone the template] and then select a project for the cloned template. - -|*Guest Management* -|Expand this section to configure the *Automatic subscription of new RHEL VirtualMachines* settings and the *Enable guest system log access* switch. - -|*Guest Management* -> *Automatic subscription of new RHEL VirtualMachines* -|Expand this section to enable automatic subscription for {op-system-base-full} virtual machines and guest system log access. - -To enable this feature, you need cluster administrator permissions, an organization ID, and an activation key. - -|*Guest Management* -> *Automatic subscription of new RHEL VirtualMachines* -> *Activation Key* field -|Enter the activation key. - -|*Guest Management* -> *Automatic subscription of new RHEL VirtualMachines* -> *Organization ID* field -|Enter the organization ID. - -|*Guest Management* -> *Automatic subscription of new RHEL VirtualMachines* -> *Enable auto updates for RHEL VirtualMachines* switch -|Enable the automatic pulling of updates from the RHEL repository. - -To enable this feature, you need an activation key and organization ID. - -|*Guest Management* -> *Enable guest system log access* switch -|Enable access to the virtual machine's guest system log. - -|*Resource Management* -|Expand this section to configure the *Auto-compute CPU limits* settings and the *Kernel Samepage Merging (KSM)* switch. - -|*Resource Management* -> *Auto-compute CPU limits* -|Enable automatic computing CPU limits on projects containing labels. - -|*Resource Management* -> *Kernel Samepage Merging (KSM)* -|Enable KSM for all nodes in the cluster. - -|*SCSI Persistent Reservation* -|Expand this section to configure the *Enable persistent reservation* switch. - -|*SCSI Persistent Reservation* -> *Enable persistent reservation* -|Enable SCSI reservation for disks. This option must be used only for cluster-aware applications. -|==== -===== - -[id="overview-settings-user_virt-web-console-overview"] -==== User tab - -You view user permissions and manage public SSH keys and welcome information on the *User* tab. - -.*User* tab -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Manage SSH keys* section -|Expand this section to add public SSH keys to a project. - -The keys are added automatically to all virtual machines that you subsequently create in the selected project. - -|*Permissions* section -|Expand this section to view cluster-wide user permissions. - -|*Welcome information* section -|Expand this section to show or hide the *Welcome information* dialog. -|==== -===== - -[id="overview-settings-preview_virt-web-console-overview"] -==== Preview features tab - -Enable select link:https://access.redhat.com/support/offerings/techpreview/[preview features] in the web console. Features in this tab change frequently. - -[id="catalog-page_virt-web-console-overview"] -== Catalog page - -You create a virtual machine from a template or instance type on the *Catalog* page. - -.*Catalog* page -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#catalog-instancetypes_virt-web-console-overview[*InstanceTypes* tab] -|Displays bootable volumes and instance types for creating a virtual machine. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#catalog-template_virt-web-console-overview[*Template catalog* tab] -|Displays a catalog of templates for creating a virtual machine. -|==== -===== - -[id="catalog-instancetypes_virt-web-console-overview"] -=== InstanceTypes tab - -You create a virtual machine from an instance type on the *InstanceTypes* tab. - -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Add volume* button -|Click to upload a volume or to use an existing persistent volume claim, volume snapshot, or data source. - -|*Volumes project* field -|Project in which bootable volumes are stored. The default is `openshift-virtualization-os-images`. - -|*Filter* field -|Filter boot sources by operating system or resource. - -|Search field -|Search boot sources by name. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. - -|Volume table -|Select a bootable volume for your virtual machine. - -|*Red Hat provided* tab -|Select an instance type provided by Red Hat. - -|*User provided* tab -|Select an instance type that you created on the *InstanceType* page. - -|*VirtualMachine details* pane -|Displays the virtual machine settings. - -|*Name* field -|Optional: Enter the virtual machine name. - -|*Storage class* field -|Select a storage class. - -|*Public SSH key* -|Click the edit icon to add a new or existing public SSH key. - -|*Dynamic SSH key injection* switch -|Enable dynamic SSH key injection. - -Only {op-system-base} supports dynamic SSH key injection. - -|*Start this VirtualMachine after creation* checkbox -|Clear this checkbox to prevent the virtual machine from starting automatically. - -|*Create VirtualMachine* button -|Creates a virtual machine. - -|*View YAML & CLI* button -|Displays the YAML configuration file and the `virtctl create` command to create the virtual machine from the command line. -|==== -===== - -[id="catalog-template_virt-web-console-overview"] -=== Template catalog tab - -You select a template on the *Template catalog* tab to create a virtual machine. - -.*Template catalog* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Template project* list -|Select the project in which Red Hat templates are located. - -By default, Red Hat templates are stored in the `openshift` project. You can edit the template project on the xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[*Overview* page -> *Settings* tab -> *Cluster* tab]. - -|*All items* \| *Default templates* \| *User templates* -|Click *All items* to display all available templates, *Default templates* to display the default templates, and *User templates* to display the user created templates. - -|*Boot source available* checkbox -|Select the checkbox to display templates with an available boot source. - -|*Operating system* checkboxes -|Select checkboxes to display templates with selected operating systems. - -|*Workload* checkboxes -|Select checkboxes to display templates with selected workloads. - -|Search field -|Search templates by keyword. - -|Template tiles -|Click a template tile to view template details and to create a virtual machine. -|==== -===== - -[id="virtualmachines-page_virt-web-console-overview"] -== VirtualMachines page - -You create and manage virtual machines on the *VirtualMachines* page. - -.*VirtualMachines* page -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Create* button -|Create a virtual machine from a template, volume, or YAML configuration file. - -|*Filter* field -|Filter virtual machines by status, template, operating system, or node. - -|Search field -|Search for virtual machines by name, label, or IP address. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is displayed only when *All Projects* is selected from the *Projects* list. - -|Virtual machines table -|List of virtual machines. - -Click the actions menu {kebab} beside a virtual machine to select *Stop*, *Restart*, *Pause*, *Clone*, *Migrate*, *Copy SSH command*, *Edit labels*, *Edit annotations*, or *Delete*. If you select *Stop*, *Force stop* replaces *Stop* in the action menu. Use *Force stop* to initiate an immediate shutdown if the operating system becomes unresponsive. - -Click a virtual machine to navigate to the *VirtualMachine details* page. -|==== -===== - -[id="virtualmachine-details-page_virt-web-console-overview"] -=== VirtualMachine details page - -You configure a virtual machine on the *Configuration* tab of the *VirtualMachine details* page. - -.*VirtualMachine details* page -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*Actions* menu -|Click the *Actions* menu to select *Stop*, *Restart*, *Pause*, *Clone*, *Migrate*, *Copy SSH command*, *Edit labels*, *Edit annotations*, or *Delete*. If you select *Stop*, *Force stop* replaces *Stop* in the action menu. Use *Force stop* to initiate an immediate shutdown if the operating system becomes unresponsive. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-overview_virt-web-console-overview[*Overview* tab] -|Resource usage, alerts, disks, and devices. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metrics_virt-web-console-overview[*Metrics* tab] -|Memory, CPU, storage, network, and migration metrics. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-yaml_virt-web-console-overview[*YAML* tab] -|Virtual machine YAML configuration file. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-configuration_virt-web-console-overview[*Configuration* tab] -|Contains the *Details*, *Storage*, *Network*, *Scheduling*, *SSH*, *Initial run*, and *Metadata* tabs. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-details_virt-web-console-overview[*Configuration* -> *Details* tab] -|Configure the *VirtualMachine details* of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-storage_virt-web-console-overview[*Configuration* -> *Storage* tab] -|Configure the storage of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-network_virt-web-console-overview[*Configuration* -> *Network* tab] -|Configure the network of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-scheduling_virt-web-console-overview[*Configuration* -> *Scheduling* tab] -|Configure the schedule of a VM to run on specific nodes. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-ssh_virt-web-console-overview[*Configuration* -> *SSH* tab] -|Configure the SSH settings of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-initial-run_virt-web-console-overview[*Configuration* -> *Initial run* tab] -|Configure the cloud-init settings for the VM, or the Sysprep settings if the VM is Windows. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metadata_virt-web-console-overview[*Configuration* -> *Metadata* tab] -|Configure label and annotation metadata of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-events_virt-web-console-overview[*Events* tab] -|View list of virtual machine events. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-events_virt-web-console-overview[*Console* tab] -|Open a console session to the virtual machine. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-snapshots_virt-web-console-overview[*Snapshots* tab] -|Create snapshots and restore virtual machines from snapshots. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-diagnostics_virt-web-console-overview[*Diagnostics* tab] -|View status conditions and volume snapshot statuses. -|==== -===== - -[id="virtualmachine-details-overview_virt-web-console-overview"] -==== Overview tab - -The *Overview* tab displays resource usage, alerts, and configuration information. - -.*Overview* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Details* tile -|General virtual machine information. - -|*Utilization* tile -|*CPU*, *Memory*, *Storage*, and *Network transfer* charts. By default, *Network transfer* displays the sum of all networks. To view the breakdown for a specific network, click *Breakdown by network*. - -|*Hardware devices* tile -|GPU and host devices. - -|*File systems* tile -|File system information. - -This information is provided by the guest agent. - -|*Services* tile -|List of services. - -|*Active users* tile -|List of active users. - -|*Alerts* tile -|{VirtProductName} alerts, grouped by severity. - -|*General* tile -|*Namespace*, *Node*, *VirtualMachineInstance*, *Pod*, and *Owner* information. - -|*Snapshots* tile -|*Take snapshot* image:icon-link.png[title="link icon",20] and snapshots table. - -|*Network interfaces* tile -|Network interfaces table. - -|*Disks* tile -|Disks table. -|==== -===== - -[id="virtualmachine-details-metrics_virt-web-console-overview"] -==== Metrics tab - -The *Metrics* tab displays memory, CPU, network, storage, and migration usage charts, as well as live migration progress. - -.*Metrics* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Time range* list -|Select a time range to filter the results. - -|*Virtualization dashboard* image:icon-link.png[title="link icon",20] -|Link to the *Workloads* tab of the current project. - -|*Utilization* -|*Memory* and *CPU* charts. - -|*Storage* -|*Storage total read/write* and *Storage IOPS total read/write* charts. - -|*Network* -|*Network in*, *Network out*, *Network bandwidth*, and *Network interface* charts. Select *All networks* or a specific network from the *Network interface* list. - -|*Migration* -|*Migration* and *KV data transfer rate* charts. - -|*LiveMigration progress* -|*LiveMigration* completion status. -|==== -===== - -[id="virtualmachine-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure the virtual machine by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="virtualmachine-details-configuration_virt-web-console-overview"] -==== Configuration tab - -You configure scheduling, network interfaces, disks, and other options on the *Configuration* tab. - -.Tabs on the *Configuration* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|Search field -|Search configurations by keyword. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-details_virt-web-console-overview[*Details* tab] -|Virtual machine details. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-storage_virt-web-console-overview[*Storage* tab] -|Configure the storage of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-network_virt-web-console-overview[*Network* tab] -|Configure the network of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-scheduling_virt-web-console-overview[*Scheduling* tab] -|Configure the schedule of a VM to run on specific nodes. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-ssh_virt-web-console-overview[*SSH* tab] -|Configure the SSH settings of the VM. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-initial-run_virt-web-console-overview[*Initial run* tab] -|Configure the cloud-init settings for the VM, or the Sysprep settings if the VM is Windows. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metadata_virt-web-console-overview[*Metadata* tab] -|Configure label and annotation metadata of the VM. -|==== -===== - -[id="virtualmachine-details-details_virt-web-console-overview"] -===== Details tab - -You manage the VM details on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Description* -|Click the edit icon to enter a description. - -|*Workload profile* -|Click the edit icon to edit the workload profile. - -|*CPU \| Memory* -|Click the edit icon to edit the *CPU \| Memory* request. Restart the virtual machine to apply the change. - -|*Hostname* -|Hostname of the virtual machine. Restart the virtual machine to apply the change. - -|*Headless mode* -|Enable headless mode. Restart the virtual machine to apply the change. - -|*Guest system log access* -|Enable guest system log access. - -|*Hardware devices* -|Manage GPU and host devices. - -|*Boot management* -|Change the boot mode and order, and enable *Start in pause mode*. -|==== -===== - -[id="virtualmachine-details-storage_virt-web-console-overview"] -===== Storage tab - -You manage the disks and environment of the VM on the *Storage* tab. - -.*Storage* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Add disk* button -|Add a disk to the virtual machine. - -|*Filter* field -|Filter by disk type. - -|Search field -|Search for a disk by name. - -|*Mount Windows drivers disk* checkbox -|Select to mount a `virtio-win` container disk as a CD-ROM to install VirtIO drivers. - -|Disks table -|List of virtual machine disks. - -Click the actions menu {kebab} beside a disk to select *Edit* or *Detach*. - -|*Add Config Map, Secret or Service Account* -|Click the link and select a config map, secret, or service account from the resource list. -|==== -===== - -[id="virtualmachine-details-network_virt-web-console-overview"] -===== Network tab - -You manage network interfaces on the *Network* tab. - -.*Network interfaces* table -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Add network interface* button -|Add a network interface to the virtual machine. - -|*Filter* field -|Filter by interface type. - -|Search field -|Search for a network interface by name or by label. - -|*Network interface* table -|List of network interfaces. - -Click the actions menu {kebab} beside a network interface to select *Edit* or *Delete*. -|==== -===== - -[id="virtualmachine-details-scheduling_virt-web-console-overview"] -===== Scheduling tab - -You configure virtual machines to run on specific nodes on the *Scheduling* tab. - -Restart the virtual machine to apply changes. - -.*Scheduling* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Node selector* -|Click the edit icon to add a label to specify qualifying nodes. - -|*Tolerations* -|Click the edit icon to add a toleration to specify qualifying nodes. - -|*Affinity rules* -|Click the edit icon to add an affinity rule. - -|*Descheduler* switch -|Enable or disable the descheduler. The descheduler evicts a running pod so that the pod can be rescheduled onto a more suitable node. - -This field is disabled if the virtual machine cannot be live migrated. - -|*Dedicated resources* -|Click the edit icon to select *Schedule this workload with dedicated resources (guaranteed policy)*. - -|*Eviction strategy* -|Click the edit icon to select *LiveMigrate* as the virtual machine eviction strategy. -|==== -===== - -[id="virtualmachine-details-ssh_virt-web-console-overview"] -===== SSH tab - -You configure the SSH details on the *SSH* tab. - -.*SSH* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*SSH access* section -|Expand this section to configure *SSH using virtctl* and *SSH service type*. - -|*Public SSH key* section -|Expand this section to configure public SSH keys and dynamic SSH public key injection. -|==== -===== - -[id="virtualmachine-details-initial-run_virt-web-console-overview"] -===== Initial run - -You manage cloud-init settings or configure Sysprep for Windows virtual machines on the *Initial run* tab. - -Restart the virtual machine to apply changes. - -.*Initial run* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Cloud-init* -|Click the edit icon to edit the cloud-init settings. - -|*Sysprep* -|Click the edit icon to upload an `Autounattend.xml` or `Unattend.xml` answer file to automate Windows virtual machine setup. -|==== -===== - -[id="virtualmachine-details-metadata_virt-web-console-overview"] -===== Metadata tab - -You configure the labels and annotations on the *Metadata* tab. - -.*Metadata* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Labels* -|Click the edit icon to manage your labels. - -|*Annotations* -|Click the edit icon to manage annotations. -|==== -===== - -[id="virtualmachine-details-events_virt-web-console-overview"] -==== Events tab - -The *Events* tab displays a list of virtual machine events. - -[id="virtualmachine-details-console_virt-web-console-overview"] -==== Console tab - -You can open a console session to the virtual machine on the *Console* tab. - -.*Console* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|Guest login credentials section -|Expand *Guest login credentials* to view the credentials created with `cloud-init`. Click the copy icon to copy the credentials to the clipboard. - -|*Console* list -|Select *VNC console* or *Serial console*. - -The *Desktop viewer* option is displayed for Windows virtual machines. You must install an RDP client on a machine on the same network. - -|*Send key* list -|Select a key-stroke combination to send to the console. - -|*Paste* button -|Paste a string from your clipboard to the VNC console. - -|*Disconnect* button -|Disconnect the console connection. - -You must manually disconnect the console connection if you open a new console session. Otherwise, the first console session continues to run in the background. -|==== -===== - -[id="virtualmachine-details-snapshots_virt-web-console-overview"] -==== Snapshots tab - -You can create a snapshot, create a copy of a virtual machine from a snapshot, restore a snapshot, edit labels or annotations, and edit or delete volume snapshots on the *Snapshots* tab. - -.*Snapshots* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Take snapshot* button -|Create a snapshot. - -|*Filter* field -|Filter snapshots by status. - -|Search field -|Search for snapshots by name or by label. - -|*Snapshot* table -|List of snapshots - -Click the snapshot name to edit the labels or annotations. - -Click the options menu {kebab} beside a snapshot to select *Create VirtualMachine*, *Restore VirtualMachine from snapshot*, or *Delete snapshot*. -|==== -===== - -[id="virtualmachine-details-diagnostics_virt-web-console-overview"] -==== Diagnostics tab - -You view the status conditions and volume snapshot status on the *Diagnostics* tab. - -.*Diagnostics* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Status conditions* table -|Display a list of conditions that are reported for the virtual machine. - -|*Filter* field -|Filter status conditions by category and condition. - -|Search field -|Search status conditions by reason. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. - -|*Volume snapshot status* table -|List of volumes, their snapshot enablement status, and reason. - -|*DataVolume status* table -|List of data volumes and their *Phase* and *Progress* values. -|==== -===== - -[id="templates-page_virt-web-console-overview"] -== Templates page - -You create, edit, and clone virtual machine templates on the *VirtualMachine Templates* page. - -[NOTE] -==== -You cannot edit a Red Hat template. However, you can clone a Red Hat template and edit it to create a custom template. -==== - -.*VirtualMachine Templates* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Create Template* button -|Create a template by editing a YAML configuration file. - -|*Filter* field -|Filter templates by type, boot source, template provider, or operating system. - -|Search field -|Search for templates by name or by label. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is only displayed when *All Projects* is selected from the *Projects* list. - -|Virtual machine templates table -|List of virtual machine templates. - -Click the actions menu {kebab} beside a template to select *Edit*, *Clone*, *Edit boot source*, *Edit boot source reference*, *Edit labels*, *Edit annotations*, or *Delete*. You cannot edit a Red Hat provided template. You can clone the Red Hat template and then edit the custom template. -|==== -===== - -[id="template-details-page_virt-web-console-overview"] -=== Template details page - -You view template settings and edit custom templates on the *Template details* page. - -.*Template details* page -[%collapsible] -===== -[cols="1,3", options="header"] -|==== -|Element -|Description - -|*YAML* switch -|Set to *ON* to view your live changes in the YAML configuration file. - -|*Actions* menu -|Click the *Actions* menu to select *Edit*, *Clone*, *Edit boot source*, *Edit boot source reference*, *Edit labels*, *Edit annotations*, or *Delete*. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-details_virt-web-console-overview[*Details* tab] -|Template settings and configurations. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-yaml_virt-web-console-overview[*YAML* tab] -|YAML configuration file. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-scheduling_virt-web-console-overview[*Scheduling* tab] -|Scheduling configurations. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-network-interfaces_virt-web-console-overview[*Network interfaces* tab] -|Network interface management. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-disks_virt-web-console-overview[*Disks* tab] -|Disk management. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-scripts_virt-web-console-overview[*Scripts* tab] -|Cloud-init, SSH key, and Sysprep management. - -|xref:../../virt/getting_started/virt-web-console-overview.adoc#template-details-parameters_virt-web-console-overview[*Parameters* tab] -|Name and cloud user password management. -|==== -===== - -[id="template-details-details_virt-web-console-overview"] -==== Details tab - -You configure a custom template on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Name* -|Template name. - -|*Namespace* -|Template namespace. - -|*Labels* -|Click the edit icon to edit the labels. - -|*Annotations* -|Click the edit icon to edit the annotations. - -|*Display name* -|Click the edit icon to edit the display name. - -|*Description* -|Click the edit icon to enter a description. - -|*Operating system* -|Operating system name. - -|*CPU\|Memory* -|Click the edit icon to edit the CPU\|Memory request. - -The number of CPUs is calculated by using the following formula: `sockets * threads * cores`. - -|*Machine type* -|Template machine type. - -|*Boot mode* -|Click the edit icon to edit the boot mode. - -|*Base template* -|Name of the base template used to create this template. - -|*Created at* -|Template creation date. - -|*Owner* -|Template owner. - -|*Boot order* -|Template boot order. - -|*Boot source* -|Boot source availability. - -|*Provider* -|Template provider. - -|*Support* -|Template support level. - -|*GPU devices* -|Click the edit icon to add a GPU device. - -|*Host devices* -|Click the edit icon to add a host device. - -|*Headless mode* -|Click the edit icon to set headless mode to *ON* and to disable VNC console. -|==== -===== - -[id="template-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure a custom template by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="template-details-scheduling_virt-web-console-overview"] -==== Scheduling tab - -You configure scheduling on the *Scheduling* tab. - -.*Scheduling* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Node selector* -|Click the edit icon to add a label to specify qualifying nodes. - -|*Tolerations* -|Click the edit icon to add a toleration to specify qualifying nodes. - -|*Affinity rules* -|Click the edit icon to add an affinity rule. - -|*Descheduler* switch -|Enable or disable the descheduler. The descheduler evicts a running pod so that the pod can be rescheduled onto a more suitable node. - -|*Dedicated resources* -|Click the edit icon to select *Schedule this workload with dedicated resources (guaranteed policy)*. - -|*Eviction strategy* -|Click the edit icon to select *LiveMigrate* as the virtual machine eviction strategy. -|==== -===== - -[id="template-details-network-interfaces_virt-web-console-overview"] -==== Network interfaces tab - -You manage network interfaces on the *Network interfaces* tab. - -.*Network interfaces* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Add network interface* button -|Add a network interface to the template. - -|*Filter* field -|Filter by interface type. - -|Search field -|Search for a network interface by name or by label. - -|Network interface table -|List of network interfaces. - -Click the actions menu {kebab} beside a network interface to select *Edit* or *Delete*. -|==== -===== - -[id="template-details-disks_virt-web-console-overview"] -==== Disks tab - -You manage disks on the *Disks* tab. - -.*Disks* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Setting -|Description - -|*Add disk* button -|Add a disk to the template. - -|*Filter* field -|Filter by disk type. - -|Search field -|Search for a disk by name. - -|Disks table -|List of template disks. - -Click the actions menu {kebab} beside a disk to select *Edit* or *Detach*. -|==== -===== - -[id="template-details-scripts_virt-web-console-overview"] -==== Scripts tab - -You manage the cloud-init settings, SSH keys, and Sysprep answer files on the *Scripts* tab. - -.*Scripts* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Cloud-init* -|Click the edit icon to edit the cloud-init settings. - -|*Public SSH key* -|Click the edit icon to create a new secret or to attach an existing secret to a Linux virtual machine. - -|*Sysprep* -|Click the edit icon to upload an `Autounattend.xml` or `Unattend.xml` answer file to automate Windows virtual machine setup. -|==== -===== - -[id="template-details-parameters_virt-web-console-overview"] -==== Parameters tab - -You edit selected template settings on the *Parameters* tab. - -.*Parameters* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*NAME* -|Set the name parameters for a virtual machine created from this template. - -|*CLOUD_USER_PASSWORD* -|Set the cloud user password parameters for a virtual machine created from this template. -|==== -===== - -[id="instancetypes-page_virt-web-console-overview"] -== InstanceTypes page - -You view and manage virtual machine instance types on the *InstanceTypes* page. - -.*VirtualMachineClusterInstancetypes* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Create* button -|Create an instance type by editing a YAML configuration file. - -|Search field -|Search for an instance type by name or by label. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is only displayed when *All Projects* is selected from the *Projects* list. - -|Instance types table -|List of instance. - -Click the actions menu {kebab} beside an instance type to select *Clone* or *Delete*. -|==== -===== - -Click an instance type to view the *VirtualMachineClusterInstancetypes details* page. - -[id="instancetypes-details-page_virt-web-console-overview"] -=== VirtualMachineClusterInstancetypes details page - -You configure an instance type on the *VirtualMachineClusterInstancetypes details* page. - -.*VirtualMachineClusterInstancetypes details* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Details* tab -|Configure an instance type by editing a form. - -|*YAML* tab -|Configure an instance type by editing a YAML configuration file. - -|*Actions* menu -|Select *Edit labels*, *Edit annotations*, *Edit VirtualMachineClusterInstancetype*, or *Delete VirtualMachineClusterInstancetype*. -|==== -===== - -[id="instancetypes-details-details_virt-web-console-overview"] -==== Details tab - -You configure an instance type by editing a form on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Name* -|VirtualMachineClusterInstancetype name. - -|*Labels* -|Click the edit icon to edit the labels. - -|*Annotations* -|Click the edit icon to edit the annotations. - -|*Created at* -|Instance type creation date. - -|*Owner* -|Instance type owner. -|==== -===== - -[id="instancetypes-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure an instance type by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="preferences-page_virt-web-console-overview"] -== Preferences page - -You view and manage virtual machine preferences on the *Preferences* page. - -.*VirtualMachineClusterPreferences* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Create* button -|Create a preference by editing a YAML configuration file. - -|Search field -|Search for a preference by name or by label. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is only displayed when *All Projects* is selected from the *Projects* list. - -|Preferences table -|List of preferences. - -Click the actions menu {kebab} beside a preference to select *Clone* or *Delete*. -|==== -===== - -Click a preference to view the *VirtualMachineClusterPreference details* page. - -[id="preferences-details-page_virt-web-console-overview"] -=== VirtualMachineClusterPreference details page - -You configure a preference on the *VirtualMachineClusterPreference details* page. - -.*VirtualMachineClusterPreference details* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Details* tab -|Configure a preference by editing a form. - -|*YAML* tab -|Configure a preference by editing a YAML configuration file. - -|*Actions* menu -|Select *Edit labels*, *Edit annotations*, *Edit VirtualMachineClusterPreference*, or *Delete VirtualMachineClusterPreference*. -|==== -===== - -[id="preferences-details-details_virt-web-console-overview"] -==== Details tab - -You configure a preference by editing a form on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Name* -|VirtualMachineClusterPreference name. - -|*Labels* -|Click the edit icon to edit the labels. - -|*Annotations* -|Click the edit icon to edit the annotations. - -|*Created at* -|Preference creation date. - -|*Owner* -|Preference owner. -|==== -===== - -[id="preferences-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure a preference type by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="bootablevolumes-page_virt-web-console-overview"] -== Bootable volumes page - -You view and manage available bootable volumes on the *Bootable volumes* page. - -.*Bootable volumes* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Add volume* button -|Add a bootable volume by completing a form or by editing a YAML configuration file. - -|*Filter* field -|Filter bootable volumes by operating system and resource type. - -|Search field -|Search for bootable volumes by name or by label. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is only displayed when *All Projects* is selected from the *Projects* list. - -|Bootable volumes table -|List of bootable volumes. - -Click the actions menu {kebab} beside a bootable volume to select *Edit*, *Remove from list*, or *Delete*. -|==== -===== - -Click a bootable volume to view the *DataSource details* page. - -[id="pvc-details-page_virt-web-console-overview"] -=== DataSource details page - -You configure the persistent volume claim (PVC) of a bootable volume on the *DataSource details* page. - -.*DataSource details* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Details* tab -|Configure the PVC by editing a form. - -|*YAML* tab -|Configure the PVC by editing a YAML configuration file. -|==== -===== - -[id="pvc-details-details_virt-web-console-overview"] -==== Details tab - -You configure the persistent volume claim (PVC) of the bootable volume by editing a form on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Name* -|Data source name. - -|*Namespace* -|Data source namespace. - -|*Labels* -|Click the edit icon to edit the labels. - -|*Annotations* -|Click the edit icon to edit the annotations. - -|*Created at* -|Data source creation date. - -|*Owner* -|Data source owner. - -|*DataImportCron* -|The `DataImportCron` object for the data source. - -|*Default Instance Type* -|Default instance type for this data source. - -|*Preference* -|The preferred `VirtualMachine` attribute values required to run a given workload. - -|*Conditions* table -|Displays the type, status, last update, reason, and message for the data source. -|==== -===== - -[id="pvc-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure the persistent volume claim of the bootable volume by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="migrationpolicies-page_virt-web-console-overview"] -== MigrationPolicies page - -You manage migration policies for workloads on the *MigrationPolicies* page. - -.*MigrationPolicies* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Create MigrationPolicy* -|Create a migration policy by entering configurations and labels in a form or by editing a YAML file. - -|Search field -|Search for a migration policy by name or by label. - -|*Manage columns* icon -|Select up to 9 columns to display in the table. The *Namespace* column is only displayed when *All Projects* is selected from the *Projects* list. - -|*MigrationPolicies* table -|List of migration policies. - -Click the actions menu {kebab} beside a migration policy to select *Edit* or *Delete*. -|==== -===== - -Click a migration policy to view the *MigrationPolicy details* page. - -[id="migrationpolicy-details-page_virt-web-console-overview"] -=== MigrationPolicy details page - -You configure a migration policy on the *MigrationPolicy details* page. - -.*MigrationPolicy details* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Details* tab -|Configure a migration policy by editing a form. - -|*YAML* tab -|Configure a migration policy by editing a YAML configuration file. - -|*Actions* menu -|Select *Edit* or *Delete*. -|==== -===== - -[id="migrationpolicy-details-details_virt-web-console-overview"] -==== Details tab - -You configure a custom template on the *Details* tab. - -.*Details* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Name* -|Migration policy name. - -|*Description* -|Migration policy description. - -|*Configurations* -|Click the edit icon to update the migration policy configurations. - -|*Bandwidth per migration* -|Bandwidth request per migration. For unlimited bandwidth, set the value to `0`. - -|*Auto converge* -|When auto converge is enabled, the performance and availability of the virtual machines might be reduced to ensure that migration is successful. - -|*Post-copy* -|Post-copy policy. - -|*Completion timeout* -|Completion timeout value in seconds. - -|*Project labels* -|Click *Edit* to edit the project labels. - -|*VirtualMachine labels* -|Click *Edit* to edit the virtual machine labels. -|==== -===== - -[id="migrationpolicy-details-yaml_virt-web-console-overview"] -==== YAML tab - -You configure the migration policy by editing the YAML file on the *YAML* tab. - -.*YAML* tab -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Save* button -|Save changes to the YAML file. - -|*Reload* button -|Discard your changes and reload the YAML file. - -|*Cancel* button -|Exit the *YAML* tab. - -|*Download* button -|Download the YAML file to your local machine. -|==== -===== - -[id="checkups-page_virt-web-console-overview"] -== Checkups page - -You run network latency and storage checkups for virtual machines on the *Checkups* page. - -.*Checkups* page -[%collapsible] -===== -[cols="1,3a", options="header"] -|==== -|Element -|Description - -|*Network latency* tab -|Run network latency checkup. - -|*Storage* tab -|Run storage checkup. -|==== -===== diff --git a/virt/install/preparing-cluster-for-virt.adoc b/virt/install/preparing-cluster-for-virt.adoc index 75e0ca7c7817..e433b283904f 100644 --- a/virt/install/preparing-cluster-for-virt.adoc +++ b/virt/install/preparing-cluster-for-virt.adoc @@ -28,7 +28,7 @@ endif::openshift-rosa,openshift-dedicated[] ifndef::openshift-rosa,openshift-dedicated[] .FIPS mode -If you install your cluster in xref:../../installing/installing-fips.adoc#installing-fips-mode_installing-fips[FIPS mode], no additional setup is required for {VirtProductName}. +If you install your cluster in xref:../../installing/overview/installing-fips.adoc#installing-fips-mode_installing-fips[FIPS mode], no additional setup is required for {VirtProductName}. endif::openshift-rosa,openshift-dedicated[] [id="supported-platforms_preparing-cluster-for-virt"] @@ -146,7 +146,7 @@ You must ensure that there is enough memory request capacity in the cluster to s Product of (Maximum number of nodes that can drain in parallel) and (Highest total VM memory request allocations across nodes) ---- -The default xref:../../virt/live_migration/virt-configuring-live-migration#virt-configuring-live-migration-limits_virt-configuring-live-migration[number of migrations that can run in parallel] in the cluster is 5. +The default xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration[number of migrations that can run in parallel] in the cluster is 5. ==== * If the virtual machine uses a host model CPU, the nodes must support the virtual machine's host model CPU. diff --git a/virt/live_migration/virt-about-live-migration.adoc b/virt/live_migration/virt-about-live-migration.adoc index dc2c840c72d2..7329f5cc30a9 100644 --- a/virt/live_migration/virt-about-live-migration.adoc +++ b/virt/live_migration/virt-about-live-migration.adoc @@ -35,15 +35,10 @@ The default number of migrations that can run in parallel in the cluster is 5. You can perform the following live migration tasks: -* Configure live migration settings: - -** xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration-limits_virt-configuring-live-migration[Limits and timeouts] -** xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[Maximum number of migrations per node or cluster] -** xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[Select a dedicated live migration network from existing networks] - +* xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration[Configure live migration settings] * xref:../../virt/live_migration/virt-initiating-live-migration.adoc#virt-initiating-live-migration[Initiate and cancel live migration] -* xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-migrations_virt-web-console-overview[Monitor the progress of all live migrations] -* xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metrics_virt-web-console-overview[View VM migration metrics] +* Monitor the progress of all live migrations in the *Migration* tab of the {virtproductname} web console. +* View VM migration metrics in the *Metrics* tab of the web console. [id="additional-resources_virt-about-live-migration"] diff --git a/virt/live_migration/virt-configuring-live-migration.adoc b/virt/live_migration/virt-configuring-live-migration.adoc index 0099dca53c68..d3eeebf9b052 100644 --- a/virt/live_migration/virt-configuring-live-migration.adoc +++ b/virt/live_migration/virt-configuring-live-migration.adoc @@ -10,15 +10,7 @@ You can configure live migration settings to ensure that the migration processes You can configure live migration policies to apply different migration configurations to groups of virtual machines (VMs). -[id="live-migration-settings"] -== Live migration settings - -You can configure the following live migration settings: - -* xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration-limits_virt-configuring-live-migration[Limits and timeouts] -* xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[Maximum number of migrations per node or cluster] - -include::modules/virt-configuring-live-migration-limits.adoc[leveloffset=+2] +include::modules/virt-configuring-live-migration-limits.adoc[leveloffset=+1] [id="live-migration-policies"] == Live migration policies @@ -27,7 +19,7 @@ You can create live migration policies to apply different migration configuratio [TIP] ==== -You can create live migration policies by using the xref:../../virt/getting_started/virt-web-console-overview.adoc#migrationpolicies-page_virt-web-console-overview[web console]. +You can create live migration policies by using the {VirtProductName} web console. ==== include::modules/virt-configuring-a-live-migration-policy.adoc[leveloffset=+2] diff --git a/virt/live_migration/virt-initiating-live-migration.adoc b/virt/live_migration/virt-initiating-live-migration.adoc index 26022525d792..2ff3c842663a 100644 --- a/virt/live_migration/virt-initiating-live-migration.adoc +++ b/virt/live_migration/virt-initiating-live-migration.adoc @@ -27,10 +27,4 @@ include::modules/virt-initiating-vm-migration-cli.adoc[leveloffset=+2] include::modules/virt-canceling-vm-migration-web.adoc[leveloffset=+2] -include::modules/virt-canceling-vm-migration-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_virt-initiating-live-migration"] -== Additional resources -* xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-migrations_virt-web-console-overview[Monitoring the progress of all live migrations by using the web console] -* xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metrics_virt-web-console-overview[Viewing VM migration metrics by using the web console] \ No newline at end of file +include::modules/virt-canceling-vm-migration-cli.adoc[leveloffset=+2] \ No newline at end of file diff --git a/virt/monitoring/virt-monitoring-overview.adoc b/virt/monitoring/virt-monitoring-overview.adoc index 81242cc7d7ff..73d927904f72 100644 --- a/virt/monitoring/virt-monitoring-overview.adoc +++ b/virt/monitoring/virt-monitoring-overview.adoc @@ -8,8 +8,8 @@ toc::[] You can monitor the health of your cluster and virtual machines (VMs) with the following tools: -Monitoring OpenShift Virtualization VMs health status:: -View the overall health of your OpenShift Virtualization environment in the web console by navigating to the *Home* -> *Overview* page in the {product-title} web console. The *Status* card displays the overall health of OpenShift Virtualization based on the alerts and conditions. +Monitoring {VirtProductName} VM health status:: +View the overall health of your {VirtProductName} environment in the web console by navigating to the *Home* -> *Overview* page in the {product-title} web console. The *Status* card displays the overall health of {VirtProductName} based on the alerts and conditions. ifndef::openshift-rosa,openshift-dedicated[] xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-running-cluster-checkups[{product-title} cluster checkup framework]:: @@ -17,7 +17,7 @@ Run automated tests on your cluster with the {product-title} cluster checkup fra * Network connectivity and latency between two VMs attached to a secondary network interface * VM running a Data Plane Development Kit (DPDK) workload with zero packet loss * Cluster storage is optimally configured for {VirtProductName} -* The {product-title} cluster can run real-time virtualized workloads. +//* The {product-title} cluster can run real-time virtualized workloads. endif::openshift-rosa,openshift-dedicated[] //:FeatureName: The {product-title} cluster checkup framework diff --git a/virt/monitoring/virt-runbooks.adoc b/virt/monitoring/virt-runbooks.adoc index 6e87571ca3d0..d4bf1bced9cf 100644 --- a/virt/monitoring/virt-runbooks.adoc +++ b/virt/monitoring/virt-runbooks.adoc @@ -6,119 +6,301 @@ include::_attributes/common-attributes.adoc[] toc::[] - :!virt-runbooks: -You can use the procedures in these runbooks to diagnose and resolve issues that trigger {VirtProductName} xref:../../observability/monitoring/managing-alerts.adoc#managing-alerts[alerts]. +To diagnose and resolve issues that trigger {VirtProductName} xref:../../observability/monitoring/managing-alerts.adoc#managing-alerts[alerts], follow the procedures in the runbooks for the {VirtProductName} Operator. Triggered {VirtProductName} alerts can be viewed in the main *Observe* -> *Alerts* tab in the web console, and also in the *Virtualization* -> *Overview* tab. + +Runbooks for the {VirtProductName} Operator are maintained in the link:https://github.com/openshift/runbooks/tree/master/alerts/openshift-virtualization-operator[openshift/runbooks] Git repository, and you can view them on GitHub. + +// IMPORTANT: these IDs are meant to exactly mirror the original module IDs // +// changing them will break links in the actual alerts // + +[id="virt-runbook-cdidataimportcronoutdated_{context}"] +== CDIDataImportCronOutdated + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIDataImportCronOutdated.md[View the runbook] for the `CDIDataImportCronOutdated` alert. + +[id="virt-runbook-cdidatavolumeunusualrestartcount_{context}"] +== CDIDataVolumeUnusualRestartCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIDataVolumeUnusualRestartCount.md[View the runbook] for the `CDIDataVolumeUnusualRestartCount` alert. + +[id="virt-runbook-cdidefaultstorageclassdegraded_{context}"] +== CDIDefaultStorageClassDegraded + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIDefaultStorageClassDegraded.md[View the runbook] for the `CDIDefaultStorageClassDegraded` alert. + +[id="virt-runbook-cdimultipledefaultvirtstorageclasses_{context}"] +== CDIMultipleDefaultVirtStorageClasses + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIMultipleDefaultVirtStorageClasses.md[View the runbook] for the `CDIMultipleDefaultVirtStorageClasses` alert. + +[id="virt-runbook-cdinodefaultstorageclass_{context}"] +== CDINoDefaultStorageClass + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDINoDefaultStorageClass.md[View the runbook] for the `CDINoDefaultStorageClass` alert. + +[id="virt-runbook-cdinotready_{context}"] +== CDINotReady + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDINotReady.md[View the runbook] for the `CDINotReady` alert. + +[id="virt-runbook-cdioperatordown_{context}"] +== CDIOperatorDown + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIOperatorDown.md[View the runbook] for the `CDIOperatorDown` alert. + +[id="virt-runbook-cdistorageprofilesincomplete_{context}"] +== CDIStorageProfilesIncomplete + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CDIStorageProfilesIncomplete.md[View the runbook] for the `CDIStorageProfilesIncomplete` alert. + +[id="virt-runbook-cnaodown_{context}"] +== CnaoDown + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CnaoDown.md[View the runbook] for the `CnaoDown` alert. + +[id="virt-runbook-cnaonmstatemigration_{context}"] +== CnaoNMstateMigration + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/CnaoNmstateMigration.md[View the runbook] for the `CnaoNMstateMigration` alert. + +[id="virt-runbook-hcoinstallationincomplete_{context}"] +== HCOInstallationIncomplete + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/HCOInstallationIncomplete.md[View the runbook] for the `HCOInstallationIncomplete` alert. + +[id="virt-runbook-hppnotready_{context}"] +== HPPNotReady + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/HPPNotReady.md[View the runbook] for the `HPPNotReady` alert. + +[id="virt-runbook-hppoperatordown_{context}"] +== HPPOperatorDown + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/HPPOperatorDown.md[View the runbook] for the `HPPOperatorDown` alert. + +[id="virt-runbook-hppsharingpoolpathwithos_{context}"] +== HPPSharingPoolPathWithOS + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/HPPSharingPoolPathWithOS.md[View the runbook] for the `HPPSharingPoolPathWithOS` alert. + +[id="virt-runbook-kubemacpooldown_{context}"] +== KubemacpoolDown + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubemacpoolDown.md[View the runbook] for the `KubemacpoolDown` alert. + +[id="virt-runbook-KubeMacPoolDuplicateMacsfound_{context}"] +== KubeMacPoolDuplicateMacsFound + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubeMacPoolDuplicateMacsFound.md[View the runbook] for the `KubeMacPoolDuplicateMacsFound` alert. + +[id="virt-runbook-kubevirtcomponentexceedsrequestedcpu_{context}"] +== KubeVirtComponentExceedsRequestedCPU + +* The `KubeVirtComponentExceedsRequestedCPU` alert is link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/deprecated/KubeVirtComponentExceedsRequestedCPU.md[deprecated]. + +[id="virt-runbook-kubevirtcomponentexceedsrequestedmemory_{context}"] +== KubeVirtComponentExceedsRequestedMemory + +* The `KubeVirtComponentExceedsRequestedMemory` alert is link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/deprecated/KubeVirtComponentExceedsRequestedMemory.md[deprecated]. + +[id="virt-runbook-kubevirtcrmodified_{context}"] +== KubeVirtCRModified + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubeVirtCRModified.md[View the runbook] for the `KubeVirtCRModified` alert. + +[id="virt-runbook-kubevirtdeprecatedapirequested_{context}"] +== KubeVirtDeprecatedAPIRequested + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubeVirtDeprecatedAPIRequested.md[View the runbook] for the `KubeVirtDeprecatedAPIRequested` alert. + +[id="virt-runbook-kubevirtnoavailablenodestorunvms_{context}"] +== KubeVirtNoAvailableNodesToRunVMs + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubeVirtNoAvailableNodesToRunVMs.md[View the runbook] for the `KubeVirtNoAvailableNodesToRunVMs` alert. + +[id="virt-runbook-kubevirtvmhighmemoryusage_{context}"] +== KubevirtVmHighMemoryUsage + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubevirtVmHighMemoryUsage.md[View the runbook] for the `KubevirtVmHighMemoryUsage` alert. + +[id="virt-runbook-kubevirtvmiexcessivemigrations_{context}"] +== KubeVirtVMIExcessiveMigrations + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/KubeVirtVMIExcessiveMigrations.md[View the runbook] for the `KubeVirtVMIExcessiveMigrations` alert. + +[id="virt-runbook-lowkvmnodescount_{context}"] +== LowKVMNodesCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowKVMNodesCount.md[View the runbook] for the `LowKVMNodesCount` alert. + +[id="virt-runbook-lowreadyvirtcontrollerscount_{context}"] +== LowReadyVirtControllersCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowReadyVirtControllersCount.md[View the runbook] for the `LowReadyVirtControllersCount` alert. + +[id="virt-runbook-lowreadyvirtoperatorscount_{context}"] +== LowReadyVirtOperatorsCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowReadyVirtOperatorsCount.md[View the runbook] for the `LowReadyVirtOperatorsCount` alert. + +[id="virt-runbook-lowvirtapicount_{context}"] +== LowVirtAPICount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowVirtAPICount.md[View the runbook] for the `LowVirtAPICount` alert. + +[id="virt-runbook-lowvirtcontrollerscount_{context}"] +== LowVirtControllersCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowVirtControllersCount.md[View the runbook] for the `LowVirtControllersCount` alert. + +[id="virt-runbook-lowvirtoperatorcount_{context}"] +== LowVirtOperatorCount + +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/LowVirtOperatorCount.md[View the runbook] for the `LowVirtOperatorCount` alert. + +[id="virt-runbook-networkaddonsconfignotready_{context}"] +== NetworkAddonsConfigNotReady -{VirtProductName} alerts are displayed on the *Virtualization* -> *Overview* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-overview_virt-web-console-overview[*Overview* tab] in the web console. +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/NetworkAddonsConfigNotReady.md[View the runbook] for the `NetworkAddonsConfigNotReady` alert. +[id="virt-runbook-noleadingvirtoperator_{context}"] +== NoLeadingVirtOperator -include::modules/virt-runbook-cdidataimportcronoutdated.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/NoLeadingVirtOperator.md[View the runbook] for the `NoLeadingVirtOperator` alert. -include::modules/virt-runbook-cdidatavolumeunusualrestartcount.adoc[leveloffset=+1] +[id="virt-runbook-noreadyvirtcontroller_{context}"] +== NoReadyVirtController -include::modules/virt-runbook-cdidefaultstorageclassdegraded.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/NoReadyVirtController.md[View the runbook] for the `NoReadyVirtController` alert. -include::modules/virt-runbook-cdimultipledefaultvirtstorageclasses.adoc[leveloffset=+1] +[id="virt-runbook-noreadyvirtoperator_{context}"] +== NoReadyVirtOperator -include::modules/virt-runbook-cdinodefaultstorageclass.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/NoReadyVirtOperator.md[View the runbook] for the `NoReadyVirtOperator` alert. -include::modules/virt-runbook-cdinotready.adoc[leveloffset=+1] +[id="virt-runbook-orphanedvirtualmachineinstances_{context}"] +== OrphanedVirtualMachineInstances -include::modules/virt-runbook-cdioperatordown.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/OrphanedVirtualMachineInstances.md[View the runbook] for the `OrphanedVirtualMachineInstances` alert. -include::modules/virt-runbook-cdistorageprofilesincomplete.adoc[leveloffset=+1] +[id="virt-runbook-outdatedvirtualmachineinstanceWorkloads_{context}"] +== OutdatedVirtualMachineInstanceWorkloads -include::modules/virt-runbook-cnaodown.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/OutdatedVirtualMachineInstanceWorkloads.md[View the runbook] for the `OutdatedVirtualMachineInstanceWorkloads` alert. -include::modules/virt-runbook-hcoinstallationincomplete.adoc[leveloffset=+1] +[id="virt-runbook-singlestackipv6unsupported_{context}"] +== SingleStackIPv6Unsupported -include::modules/virt-runbook-hppnotready.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SingleStackIPv6Unsupported.md[View the runbook] for the `SingleStackIPv6Unsupported` alert. -include::modules/virt-runbook-hppoperatordown.adoc[leveloffset=+1] +[id="virt-runbook-sspcommontemplatesmodificationreverted_{context}"] +== SSPCommonTemplatesModificationReverted -include::modules/virt-runbook-hppsharingpoolpathwithos.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPCommonTemplatesModificationReverted.md[View the runbook] for the `SSPCommonTemplatesModificationReverted` alert. -include::modules/virt-runbook-kubemacpooldown.adoc[leveloffset=+1] +[id="virt-runbook-sspdown_{context}"] +== SSPDown -include::modules/virt-runbook-kubemacpoolduplicatemacsfound.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPDown.md[View the runbook] for the `SSPDown` alert. -include::modules/virt-runbook-kubevirtcomponentexceedsrequestedcpu.adoc[leveloffset=+1] +[id="virt-runbook-sspfailingtoreconcile_{context}"] +== SSPFailingToReconcile -include::modules/virt-runbook-kubevirtcomponentexceedsrequestedmemory.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPFailingToReconcile.md[View the runbook] for the `SSPFailingToReconcile` alert. -include::modules/virt-runbook-kubevirtcrmodified.adoc[leveloffset=+1] +[id="virt-runbook-ssphighraterejectedvms_{context}"] +== SSPHighRateRejectedVms -include::modules/virt-runbook-kubevirtdeprecatedapirequested.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPHighRateRejectedVms.md[View the runbook] for the `SSPHighRateRejectedVms` alert. -include::modules/virt-runbook-kubevirtnoavailablenodestorunvms.adoc[leveloffset=+1] +[id="virt-runbook-ssptemplatevalidatordown_{context}"] +== SSPTemplateValidatorDown -include::modules/virt-runbook-kubevirtvmhighmemoryusage.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPTemplateValidatorDown.md[View the runbook] for the `SSPTemplateValidatorDown` alert. -include::modules/virt-runbook-kubevirtvmiexcessivemigrations.adoc[leveloffset=+1] +[id="virt-runbook-sspoperatordown_{context}"] +== SSPOperatorDown -include::modules/virt-runbook-lowkvmnodescount.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/SSPOperatorDown.md[View the runbook] for the `SSPOperatorDown` alert. -include::modules/virt-runbook-lowreadyvirtcontrollerscount.adoc[leveloffset=+1] +[id="virt-runbook-unsupportedhcomodification_{context}"] +== UnsupportedHCOModification -include::modules/virt-runbook-lowreadyvirtoperatorscount.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/UnsupportedHCOModification.md[View the runbook] for the `UnsupportedHCOModification` alert. -include::modules/virt-runbook-lowvirtapicount.adoc[leveloffset=+1] +[id="virt-runbook-virtapidown_{context}"] +== VirtAPIDown -include::modules/virt-runbook-lowvirtcontrollerscount.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtAPIDown.md[View the runbook] for the `VirtAPIDown` alert. -include::modules/virt-runbook-lowvirtoperatorcount.adoc[leveloffset=+1] +[id="virt-runbook-virtapiresterrorsburst_{context}"] +== VirtApiRESTErrorsBurst -include::modules/virt-runbook-networkaddonsconfignotready.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtApiRESTErrorsBurst.md[View the runbook] for the `VirtApiRESTErrorsBurst` alert. -include::modules/virt-runbook-noleadingvirtoperator.adoc[leveloffset=+1] +[id="virt-runbook-virtapiresterrorshigh_{context}"] +== VirtApiRESTErrorsHigh -include::modules/virt-runbook-noreadyvirtcontroller.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtApiRESTErrorsHigh.md[View the runbook] for the `VirtApiRESTErrorsHigh` alert. -include::modules/virt-runbook-noreadyvirtoperator.adoc[leveloffset=+1] +[id="virt-runbook-virtcontrollerdown_{context}"] +== VirtControllerDown -include::modules/virt-runbook-orphanedvirtualmachineinstances.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtControllerDown.md[View the runbook] for the `VirtControllerDown` alert. -include::modules/virt-runbook-outdatedvirtualmachineinstanceworkloads.adoc[leveloffset=+1] +[id="virt-runbook-virtcontrollerresterrorsburst_{context}"] +== VirtControllerRESTErrorsBurst -include::modules/virt-runbook-singlestackipv6unsupported.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtControllerRESTErrorsBurst.md[View the runbook] for the `VirtControllerRESTErrorsBurst` alert. -include::modules/virt-runbook-sspcommontemplatesmodificationreverted.adoc[leveloffset=+1] +[id="virt-runbook-virtcontrollerresterrorshigh_{context}"] +== VirtControllerRESTErrorsHigh -include::modules/virt-runbook-sspdown.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtControllerRESTErrorsHigh.md[View the runbook] for the `VirtControllerRESTErrorsHigh` alert. -include::modules/virt-runbook-sspfailingtoreconcile.adoc[leveloffset=+1] +[id="virt-runbook-virthandlerdaemonsetrolloutfailing_{context}"] +== VirtHandlerDaemonSetRolloutFailing -include::modules/virt-runbook-ssphighraterejectedvms.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtHandlerDaemonSetRolloutFailing.md[View the runbook] for the `VirtHandlerDaemonSetRolloutFailing` alert. -include::modules/virt-runbook-ssptemplatevalidatordown.adoc[leveloffset=+1] +[id="virt-runbook-virthandlerresterrorsburst_{context}"] +== VirtHandlerRESTErrorsBurst -include::modules/virt-runbook-unsupportedhcomodification.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtHandlerRESTErrorsBurst.md[View the runbook] for the `VirtHandlerRESTErrorsBurst` alert. -include::modules/virt-runbook-virtapidown.adoc[leveloffset=+1] +[id="virt-runbook-virthandlerresterrorshigh_{context}"] +== VirtHandlerRESTErrorsHigh -include::modules/virt-runbook-virtapiresterrorsburst.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtHandlerRESTErrorsHigh.md[View the runbook] for the `VirtHandlerRESTErrorsHigh` alert. -include::modules/virt-runbook-virtapiresterrorshigh.adoc[leveloffset=+1] +[id="virt-runbook-virtoperatordown_{context}"] +== VirtOperatorDown -include::modules/virt-runbook-virtcontrollerdown.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtOperatorDown.md[View the runbook] for the `VirtOperatorDown` alert. -include::modules/virt-runbook-virtcontrollerresterrorsburst.adoc[leveloffset=+1] +[id="virt-runbook-virtoperatorresterrorsburst_{context}"] +== VirtOperatorRESTErrorsBurst -include::modules/virt-runbook-virtcontrollerresterrorshigh.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtOperatorRESTErrorsBurst.md[View the runbook] for the `VirtOperatorRESTErrorsBurst` alert. -include::modules/virt-runbook-virthandlerdaemonsetrolloutfailing.adoc[leveloffset=+1] +[id="virt-runbook-virtoperatorresterrorshigh_{context}"] +== VirtOperatorRESTErrorsHigh -include::modules/virt-runbook-virthandlerresterrorsburst.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VirtOperatorRESTErrorsHigh.md[View the runbook] for the `VirtOperatorRESTErrorsHigh` alert. -include::modules/virt-runbook-virthandlerresterrorshigh.adoc[leveloffset=+1] +[id="virt-runbook-virtualmachinecrcerrors_{context}"] +== VirtualMachineCRCErrors -include::modules/virt-runbook-virtoperatordown.adoc[leveloffset=+1] +* The runbook for the `VirtualMachineCRCErrors` alert is deprecated because the alert was renamed to `VMStorageClassWarning`. +** link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VMStorageClassWarning.md[View the runbook] for the `VMStorageClassWarning` alert. -include::modules/virt-runbook-virtoperatorresterrorsburst.adoc[leveloffset=+1] +[id="virt-runbook-vmcannotbeevicted_{context}"] +== VMCannotBeEvicted -include::modules/virt-runbook-virtoperatorresterrorshigh.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VMCannotBeEvicted.md[View the runbook] for the `VMCannotBeEvicted` alert. -include::modules/virt-runbook-vmcannotbeevicted.adoc[leveloffset=+1] +[id="virt-runbook-vmstorageclasswarning_{context}"] +== VMStorageClassWarning -include::modules/virt-runbook-vmstorageclasswarning.adoc[leveloffset=+1] +* link:https://github.com/openshift/runbooks/blob/master/alerts/openshift-virtualization-operator/VMStorageClassWarning.md[View the runbook] for the `VMStorageClassWarning` alert. diff --git a/virt/monitoring/virt-running-cluster-checkups.adoc b/virt/monitoring/virt-running-cluster-checkups.adoc index 05e2fbf35f1a..f30b9aa5f257 100644 --- a/virt/monitoring/virt-running-cluster-checkups.adoc +++ b/virt/monitoring/virt-running-cluster-checkups.adoc @@ -8,43 +8,48 @@ toc::[] {VirtProductName} includes the following predefined checkups that can be used for cluster maintenance and troubleshooting: -xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-measuring-latency-vm-secondary-network_virt-running-cluster-checkups[Latency checkup]:: -Verifies network connectivity and measures latency between two virtual machines (VMs) that are attached to a secondary network interface. -xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-checking-cluster-dpdk-readiness_virt-running-cluster-checkups[DPDK checkup]:: -Verifies that a node can run a VM with a Data Plane Development Kit (DPDK) workload with zero packet loss. -xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-checking-storage-configuration_virt-running-cluster-checkups[Storage checkup]:: -Verifies if the cluster storage is optimally configured for {VirtProductName}. -xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-running-real-time-checkup_virt-running-cluster-checkups[Real-time checkup]:: -Verifies that your {product-title} cluster can run virtualized real-time workloads. +* Latency checkup, which verifies network connectivity and measures latency between two virtual machines (VMs) that are attached to a secondary network interface. ++ +[IMPORTANT] +===== +Before you run a latency checkup, you must first xref:../../virt/vm_networking/virt-connecting-vm-to-linux-bridge.adoc#virt-connecting-vm-to-linux-bridge[create a bridge interface] on the cluster nodes to connect the VM's secondary interface to any interface on the node. If you do not create a bridge interface, the VMs do not start and the job fails. +===== + +* Storage checkup, which verifies if the cluster storage is optimally configured for {VirtProductName}. +* DPDK checkup, which verifies that a node can run a VM with a Data Plane Development Kit (DPDK) workload with zero packet loss. +//xref:../../virt/monitoring/virt-running-cluster-checkups.adoc#virt-running-real-time-checkup_virt-running-cluster-checkups[Real-time checkup]:: +//Verifies that your {product-title} cluster can run virtualized real-time workloads. :FeatureName: The {VirtProductName} cluster checkup framework include::snippets/technology-preview.adoc[] include::modules/virt-about-cluster-checkup-framework.adoc[leveloffset=+1] -== Running cluster checkups in the web console - -Use the web console to run a latency or storage checkup on a cluster. +[id="virt-running-cluster-checkups-console_{context}"] +== Running checkups by using the web console -Use the following procedures the first time you run a latency checkup and storage checkup in the web console. For additional checkups, click *Run checkup* on either checkup tab, and select the appropriate checkup from the drop down menu. - -[IMPORTANT] -==== -Before you run a latency checkup, you must first xref:../../virt/vm_networking/virt-connecting-vm-to-linux-bridge.adoc#virt-connecting-vm-to-linux-bridge[create a bridge interface] on the cluster nodes to connect the VM's secondary interface to any interface on the node. If you do not create a bridge interface, the VMs will not start and the job will fail. -==== +Use the following procedures the first time you run checkups by using the web console. For additional checkups, click *Run checkup* on either checkup tab, and select the appropriate checkup from the drop down menu. include::modules/virt-latency-checkup-web-console.adoc[leveloffset=+2] include::modules/virt-storage-checkup-web-console.adoc[leveloffset=+2] -include::modules/virt-measuring-latency-vm-secondary-network.adoc[leveloffset=+1] +[id="virt-running-cluster-checkups-cli_{context}"] +== Running checkups by using the command line -include::modules/virt-checking-cluster-dpdk-readiness.adoc[leveloffset=+1] +Use the following procedures the first time you run checkups by using the command line. -include::modules/virt-dpdk-config-map-parameters.adoc[leveloffset=+2] +include::modules/virt-measuring-latency-vm-secondary-network.adoc[leveloffset=+2] -include::modules/virt-building-vm-containerdisk-image.adoc[leveloffset=+2] +include::modules/virt-checking-storage-configuration.adoc[leveloffset=+2] +include::modules/virt-checking-cluster-dpdk-readiness.adoc[leveloffset=+2] + +include::modules/virt-dpdk-config-map-parameters.adoc[leveloffset=+3] + +include::modules/virt-building-vm-containerdisk-image.adoc[leveloffset=+3] + +//// include::modules/virt-running-real-time-checkup.adoc[leveloffset=+1] [role="_additional-resources"] @@ -55,7 +60,8 @@ include::modules/virt-real-time-config-map-parameters.adoc[leveloffset=+2] include::modules/virt-building-real-time-container-disk-image.adoc[leveloffset=+2] -include::modules/virt-checking-storage-configuration.adoc[leveloffset=+1] +//// + ifndef::openshift-rosa,openshift-dedicated[] [role="_additional-resources"] diff --git a/virt/nodes/virt-node-maintenance.adoc b/virt/nodes/virt-node-maintenance.adoc index f9563fdb578e..916d3e42d668 100644 --- a/virt/nodes/virt-node-maintenance.adoc +++ b/virt/nodes/virt-node-maintenance.adoc @@ -38,7 +38,7 @@ VM eviction strategy:: The VM `LiveMigrate` eviction strategy ensures that a virtual machine instance (VMI) is not interrupted if the node is placed into maintenance or drained. VMIs with this eviction strategy will be live migrated to another node. + -You can configure eviction strategies for virtual machines (VMs) by using the xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-scheduling_virt-web-console-overview[web console] or the xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-a-live-migration-policy_virt-configuring-live-migration[command line]. +You can configure eviction strategies for virtual machines (VMs) by using the {VirtProductName} web console or the xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-a-live-migration-policy_virt-configuring-live-migration[command line]. + [IMPORTANT] ==== diff --git a/virt/post_installation_configuration/virt-configuring-higher-vm-workload-density.adoc b/virt/post_installation_configuration/virt-configuring-higher-vm-workload-density.adoc new file mode 100644 index 000000000000..ce9c6000055a --- /dev/null +++ b/virt/post_installation_configuration/virt-configuring-higher-vm-workload-density.adoc @@ -0,0 +1,28 @@ +:_mod-docs-content-type: ASSEMBLY +[id="virt-configuring-higher-vm-workload-density"] += Configuring higher VM workload density +include::_attributes/common-attributes.adoc[] +:context: virt-configuring-higher-vm-workload-density + +toc::[] + +To increase the number of virtual machines (VMs), you can configure a higher VM workload density in your cluster by overcommitting the amount of memory (RAM). + +:FeatureName: Configuring higher workload density +include::snippets/technology-preview.adoc[] + +The following workloads are especially suited for higher workload density: + +* Many similar workloads +* Underused workloads + +[NOTE] +==== +While overcommitted memory can lead to a higher workload density, it can also lower workload performance of a highly utilized system. +==== + +include::modules/virt-using-wasp-agent-to-configure-higher-vm-workload-density.adoc[leveloffset=+1] + + + + diff --git a/virt/post_installation_configuration/virt-node-placement-virt-components.adoc b/virt/post_installation_configuration/virt-node-placement-virt-components.adoc index 6bda7e09f31a..c64842845aac 100644 --- a/virt/post_installation_configuration/virt-node-placement-virt-components.adoc +++ b/virt/post_installation_configuration/virt-node-placement-virt-components.adoc @@ -25,4 +25,6 @@ include::modules/virt-node-placement-rule-examples.adoc[leveloffset=+1] * xref:../../virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.adoc#virt-specifying-nodes-for-vms[Specifying nodes for virtual machines] * xref:../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors] * xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity[Controlling pod placement on nodes using node affinity rules] -* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] \ No newline at end of file +ifndef::openshift-dedicated,openshift-rosa[] +* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] +endif::openshift-dedicated,openshift-rosa[] diff --git a/virt/post_installation_configuration/virt-post-install-storage-config.adoc b/virt/post_installation_configuration/virt-post-install-storage-config.adoc index ddecc99cc4d3..c43932bb8d9f 100644 --- a/virt/post_installation_configuration/virt-post-install-storage-config.adoc +++ b/virt/post_installation_configuration/virt-post-install-storage-config.adoc @@ -9,7 +9,7 @@ toc::[] The following storage configuration tasks are mandatory: ifndef::openshift-rosa,openshift-dedicated[] -* You must configure a xref:../../post_installation_configuration/storage-configuration.adoc#defining-storage-classes_post-install-storage-configuration[default storage class] for your cluster. Otherwise, the cluster cannot receive automated boot source updates. +* You must configure a xref:../../storage/dynamic-provisioning.adoc#defining-storage-classes_dynamic-provisioning[default storage class] for your cluster. Otherwise, the cluster cannot receive automated boot source updates. endif::openshift-rosa,openshift-dedicated[] * You must configure xref:../../virt/storage/virt-configuring-storage-profile.adoc#virt-configuring-storage-profile[storage profiles] if your storage provider is not recognized by CDI. A storage profile provides recommended storage settings based on the associated storage class. diff --git a/virt/release_notes/virt-4-17-release-notes.adoc b/virt/release_notes/virt-4-17-release-notes.adoc new file mode 100644 index 000000000000..682a4dac1943 --- /dev/null +++ b/virt/release_notes/virt-4-17-release-notes.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: ASSEMBLY +[id="virt-4-17-release-notes"] += {VirtProductName} release notes +include::_attributes/common-attributes.adoc[] +:context: virt-4-17-release-notes + +toc::[] + +Do not add or edit release notes here. Edit release notes directly in the branch +that they are relevant for. + +This file is here to allow builds to work. \ No newline at end of file diff --git a/virt/storage/virt-automatic-bootsource-updates.adoc b/virt/storage/virt-automatic-bootsource-updates.adoc index 5ff37917f53d..9ffba454afd5 100644 --- a/virt/storage/virt-automatic-bootsource-updates.adoc +++ b/virt/storage/virt-automatic-bootsource-updates.adoc @@ -31,13 +31,13 @@ _Custom_ boot sources that are not provided by {VirtProductName} are not control ifndef::openshift-rosa,openshift-dedicated[] [IMPORTANT] ==== -You must configure a storage class. Otherwise, the cluster cannot receive automated updates for custom boot sources. See xref:../../post_installation_configuration/storage-configuration.adoc#defining-storage-classes_post-install-storage-configuration[Defining a storage class] for details. +You must configure a storage class. Otherwise, the cluster cannot receive automated updates for custom boot sources. See xref:../../storage/dynamic-provisioning.adoc#defining-storage-classes_dynamic-provisioning[Defining a storage class] for details. ==== endif::openshift-rosa,openshift-dedicated[] ifdef::openshift-rosa,openshift-dedicated[] [IMPORTANT] ==== -You must configure a storage profile. Otherwise, the cluster cannot receive automated updates for custom boot sources. See xref:../../virt/storage/virt-configuring-storage-profile.adoc#virt-configuring-storage-profile[Configure storage profiles] for details. +You must configure a storage profile. Otherwise, the cluster cannot receive automated updates for custom boot sources. See xref:../../virt/storage/virt-configuring-storage-profile.adoc#virt-configuring-storage-profile[Configure storage profiles] for details. ==== endif::openshift-rosa,openshift-dedicated[] diff --git a/virt/storage/virt-configuring-storage-profile.adoc b/virt/storage/virt-configuring-storage-profile.adoc index a9ffc305bc9d..5dc544adcb14 100644 --- a/virt/storage/virt-configuring-storage-profile.adoc +++ b/virt/storage/virt-configuring-storage-profile.adoc @@ -8,9 +8,9 @@ toc::[] A storage profile provides recommended storage settings based on the associated storage class. A storage profile is allocated for each storage class. -If the Containerized Data Importer (CDI) does not recognize your storage provider, you must configure storage profiles. +The Containerized Data Importer (CDI) recognizes a storage provider if it has been configured to identify and interact with the storage provider's capabilities. -For recognized storage types, CDI provides values that optimize the creation of PVCs. However, you can configure automatic settings for a storage class if you customize the storage profile. +For recognized storage types, the CDI provides values that optimize the creation of PVCs. You can also configure automatic settings for the storage class by customizing the storage profile. If the CDI does not recognize your storage provider, you must configure storage profiles. ifndef::openshift-rosa,openshift-dedicated[] [IMPORTANT] diff --git a/virt/storage/virt-storage-config-overview.adoc b/virt/storage/virt-storage-config-overview.adoc index 57fe626a97ab..b937496a21b3 100644 --- a/virt/storage/virt-storage-config-overview.adoc +++ b/virt/storage/virt-storage-config-overview.adoc @@ -14,7 +14,7 @@ You can configure a default storage class, storage profiles, Containerized Data The following storage configuration tasks are mandatory: ifndef::openshift-rosa,openshift-dedicated[] -xref:../../post_installation_configuration/storage-configuration.adoc#defining-storage-classes_post-install-storage-configuration[Configure a default storage class]:: +xref:../../storage/dynamic-provisioning.adoc#defining-storage-classes_dynamic-provisioning[Configure a default storage class]:: You must configure a default storage class for your cluster. Otherwise, the cluster cannot receive automated boot source updates. endif::openshift-rosa,openshift-dedicated[] @@ -68,4 +68,4 @@ You can perform the following boot source update configuration task: xref:../../virt/storage/virt-automatic-bootsource-updates.adoc#virt-automatic-bootsource-updates[Manage automatic boot source updates]:: -Boot sources can make virtual machine (VM) creation more accessible and efficient for users. If automatic boot source updates are enabled, CDI imports, polls, and updates the images so that they are ready to be cloned for new VMs. By default, CDI automatically updates Red Hat boot sources. You can enable automatic updates for custom boot sources. \ No newline at end of file +Boot sources can make virtual machine (VM) creation more accessible and efficient for users. If automatic boot source updates are enabled, CDI imports, polls, and updates the images so that they are ready to be cloned for new VMs. By default, CDI automatically updates Red Hat boot sources. You can enable automatic updates for custom boot sources. diff --git a/virt/support/virt-support-overview.adoc b/virt/support/virt-support-overview.adoc index decc827b586c..41ab9f5b9317 100644 --- a/virt/support/virt-support-overview.adoc +++ b/virt/support/virt-support-overview.adoc @@ -21,22 +21,22 @@ The {product-title} web console displays resource usage, alerts, events, and tre |*Overview* page |Cluster details, status, alerts, inventory, and resource usage -|*Virtualization* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-overview_virt-web-console-overview[*Overview* tab] +|*Virtualization* -> *Overview* tab |{VirtProductName} resources, usage, alerts, and status -|*Virtualization* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-top-consumers_virt-web-console-overview[*Top consumers* tab] +|*Virtualization* -> *Top consumers* tab |Top consumers of CPU, memory, and storage -|*Virtualization* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#overview-migrations_virt-web-console-overview[*Migrations* tab] +|*Virtualization* -> *Migrations* tab |Progress of live migrations -|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-metrics_virt-web-console-overview[*Metrics* tab] +|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> *Metrics* tab |VM resource usage, storage, network, and migration -|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-events_virt-web-console-overview[*Events* tab] +|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> *Events* tab |List of VM events -|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-diagnostics_virt-web-console-overview[*Diagnostics tab*] +|*VirtualMachines* -> *VirtualMachine* -> *VirtualMachine details* -> *Diagnostics* tab |VM status conditions and volume snapshot status |==== diff --git a/virt/support/virt-troubleshooting.adoc b/virt/support/virt-troubleshooting.adoc index 264b5ede11f4..78c93c7fe675 100644 --- a/virt/support/virt-troubleshooting.adoc +++ b/virt/support/virt-troubleshooting.adoc @@ -9,7 +9,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can troubleshoot {VirtProductName} components by using the xref:../../virt/support/virt-support-overview.adoc#virt-web-console_virt-support-overview[tools provided in the web console] or by using the `oc` CLI tool. +You can troubleshoot {VirtProductName} components by using the tools provided in the web console or by using the `oc` CLI tool. // This section is in an assembly so that we can use xrefs. [id="events_{context}"] @@ -17,7 +17,7 @@ You can troubleshoot {VirtProductName} components by using the xref:../../virt/s xref:../../nodes/clusters/nodes-containers-events.adoc#nodes-containers-events[{product-title} events] are records of important life-cycle information and are useful for monitoring and troubleshooting virtual machine, namespace, and resource issues. -* VM events: Navigate to the xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-events_virt-web-console-overview[*Events* tab] of the *VirtualMachine details* page in the web console. +* VM events: Navigate to the *Events* tab of the *VirtualMachine details* page in the web console. Namespace events:: You can view namespace events by running the following command: @@ -51,7 +51,7 @@ include::modules/virt-viewing-logs-cli.adoc[leveloffset=+2] [id="guest-system-logs_{context}"] == Guest system logs -Viewing the boot logs of VM guests can help diagnose issues. You can configure access to guests' logs and view them by using either the {product-title} web console or the `oc` CLI. +Viewing the boot logs of VM guests can help diagnose issues. You can configure access to guests' logs and view them by using either the {product-title} web console or the `oc` CLI. This feature is disabled by default. If a VM does not explicitly have this setting enabled or disabled, it inherits the cluster-wide default setting. diff --git a/virt/updating/upgrading-virt.adoc b/virt/updating/upgrading-virt.adoc index d525f69e1221..6ece5f09fa49 100644 --- a/virt/updating/upgrading-virt.adoc +++ b/virt/updating/upgrading-virt.adoc @@ -52,4 +52,4 @@ endif::openshift-rosa,openshift-dedicated,openshift-origin[] * xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service versions (CSVs)] * xref:../../virt/live_migration/virt-about-live-migration.adoc#virt-about-live-migration[About live migration] * xref:../../virt/nodes/virt-node-maintenance.adoc#eviction-strategies[Configuring eviction strategies] -* xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration-limits_virt-configuring-live-migration[Configuring live migration limits and timeouts] +* xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration[Configuring live migration limits and timeouts] diff --git a/virt/virtual_machines/advanced_vm_management/virt-high-availability-for-vms.adoc b/virt/virtual_machines/advanced_vm_management/virt-high-availability-for-vms.adoc index 8876d0c79f5c..8a7ab5adccf9 100644 --- a/virt/virtual_machines/advanced_vm_management/virt-high-availability-for-vms.adoc +++ b/virt/virtual_machines/advanced_vm_management/virt-high-availability-for-vms.adoc @@ -24,4 +24,4 @@ endif::openshift-rosa,openshift-dedicated[] You can configure remediating nodes by installing the Self Node Remediation Operator from the OperatorHub and enabling machine health checks or node remediation checks. -For more information on remediation, fencing, and maintaining nodes, see the link:https://access.redhat.com/documentation/en-us/workload_availability_for_red_hat_openshift/23.2/html-single/remediation_fencing_and_maintenance/index#about-remediation-fencing-maintenance[Workload Availability for Red Hat OpenShift] documentation. +For more information on remediation, fencing, and maintaining nodes, see the link:https://access.redhat.com/documentation/en-us/workload_availability_for_red_hat_openshift[Workload Availability for Red Hat OpenShift] documentation. diff --git a/virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.adoc b/virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.adoc index 433fa67e5854..114680dab98a 100644 --- a/virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.adoc +++ b/virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.adoc @@ -27,4 +27,6 @@ include::modules/virt-example-vm-node-placement-tolerations.adoc[leveloffset=+2] * xref:../../../virt/post_installation_configuration/virt-node-placement-virt-components.adoc#virt-node-placement-virt-components[Specifying nodes for virtualization components] * xref:../../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors] * xref:../../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity[Controlling pod placement on nodes using node affinity rules] +ifndef::openshift-dedicated,openshift-rosa[] * xref:../../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] +endif::openshift-dedicated,openshift-rosa[] diff --git a/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-rh-images-overview.adoc b/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-rh-images-overview.adoc index 542957f9aadb..5c5fb2a17ff4 100644 --- a/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-rh-images-overview.adoc +++ b/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-rh-images-overview.adoc @@ -10,7 +10,7 @@ Red Hat images are xref:../../../virt/virtual_machines/creating_vms_rh/virt-crea Red Hat images are automatically updated. You can disable and re-enable automatic updates for these images. See xref:../../../virt/storage/virt-automatic-bootsource-updates.adoc#managing-rh-boot-source-updates_virt-automatic-bootsource-updates[Managing Red Hat boot source updates]. -Cluster administrators can enable automatic subscription for {op-system-base-full} virtual machines in the {VirtProductName} xref:../../../virt/getting_started/virt-web-console-overview.adoc#overview-settings-cluster_virt-web-console-overview[web console]. +Cluster administrators can enable automatic subscription for {op-system-base-full} virtual machines in the {VirtProductName} xref:../../../virt/about_virt/about-virt.adoc#about-virt[web console]. You can create virtual machines (VMs) from operating system images provided by Red Hat by using one of the following methods: diff --git a/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc b/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc index b3c3233eda5e..8d36ccc6b9f2 100644 --- a/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc +++ b/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc @@ -17,11 +17,10 @@ You can expedite VM creation by using templates that have an available boot sour Templates without a boot source are labeled *Boot source required*. See xref:../../../virt/virtual_machines/creating_vms_custom/virt-creating-vms-from-custom-images-overview.adoc#virt-creating-vms-from-custom-images-overview[Creating virtual machines from custom images]. Customization:: -You can customize the disk source and VM parameters before you start the VM: +You can customize the disk source and VM parameters before you start the VM. -* See xref:../../../virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc#virt-vm-storage-volume-types_virt-creating-vms-from-templates[storage volume types] and xref:../../../virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc#virt-storage-wizard-fields-web_virt-creating-vms-from-templates[storage fields] for details about disk source settings. +See xref:../../../virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc#virt-vm-storage-volume-types_virt-creating-vms-from-templates[storage volume types] and xref:../../../virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.adoc#virt-storage-wizard-fields-web_virt-creating-vms-from-templates[storage fields] for details about disk source settings. -* See the xref:../../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-overview_virt-web-console-overview[*Overview*], xref:../../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-yaml_virt-web-console-overview[*YAML*], and xref:../../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-configuration_virt-web-console-overview[*Configuration*] tab documentation for details about VM settings. [NOTE] ==== diff --git a/virt/virtual_machines/virt-accessing-vm-ssh.adoc b/virt/virtual_machines/virt-accessing-vm-ssh.adoc index 9ac0f8b57da3..0def0dc4fc1e 100644 --- a/virt/virtual_machines/virt-accessing-vm-ssh.adoc +++ b/virt/virtual_machines/virt-accessing-vm-ssh.adoc @@ -43,9 +43,11 @@ include::modules/virt-about-static-and-dynamic-ssh-keys.adoc[leveloffset=+2] You can add a statically managed public SSH key when you create a virtual machine (VM) by using the {product-title} web console or the command line. The key is added as a cloud-init data source when the VM boots for the first time. -[TIP] +You can also add a public SSH key to a project when you create a VM by using the web console. The key is saved as a secret and is added automatically to all VMs that you create. + +[NOTE] ==== -You can also add the key to a project by using the {product-title} web console. Afterwards, this key is added automatically to VMs that you create in the project. +If you add a secret to a project and then delete the VM, the secret is retained because it is a namespace resource. You must delete the secret manually. ==== :context: static-key @@ -88,7 +90,7 @@ include::modules/virt-using-virtctl-ssh-command.adoc[leveloffset=+2] [TIP] ==== -You can copy the `virtctl ssh` command in the web console by selecting *Copy SSH command* from the options {kebab} menu beside a VM on the xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachines-page_virt-web-console-overview[*VirtualMachines* page]. +You can copy the `virtctl ssh` command in the web console by selecting *Copy SSH command* from the options {kebab} menu beside a VM on the *VirtualMachines* page. ==== include::modules/virt-using-virtctl-port-forward-command.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virt-edit-vms.adoc b/virt/virtual_machines/virt-edit-vms.adoc index dacfcb2a6d59..d780ff54d67e 100644 --- a/virt/virtual_machines/virt-edit-vms.adoc +++ b/virt/virtual_machines/virt-edit-vms.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can update a virtual machine (VM) configuration by using the {product-title} web console. You can update the xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-yaml_virt-web-console-overview[YAML file] or the xref:../../virt/getting_started/virt-web-console-overview.adoc#virtualmachine-details-page_virt-web-console-overview[*VirtualMachine details* page]. +You can update a virtual machine (VM) configuration by using the {product-title} web console. You can update the YAML file or the *VirtualMachine details* page. You can also edit a VM by using the command line. @@ -19,7 +19,7 @@ include::modules/virt-editing-vm-cli.adoc[leveloffset=+1] include::modules/virt-add-disk-to-vm.adoc[leveloffset=+1] include::modules/virt-storage-wizard-fields-web.adoc[leveloffset=+2] - +include::modules/virt-mounting-windows-driver-disk-on-vm.adoc[leveloffset=+1] include::modules/virt-adding-secret-configmap-service-account-to-vm.adoc[leveloffset=+1] [discrete] diff --git a/virt/virtual_machines/virt-managing-vms-openshift-pipelines.adoc b/virt/virtual_machines/virt-managing-vms-openshift-pipelines.adoc index b2780d089b03..385df23fa4d5 100644 --- a/virt/virtual_machines/virt-managing-vms-openshift-pipelines.adoc +++ b/virt/virtual_machines/virt-managing-vms-openshift-pipelines.adoc @@ -14,10 +14,6 @@ The Scheduling, Scale, and Performance (SSP) Operator integrates {VirtProductNam * Run commands in VMs * Manipulate disk images with `libguestfs` tools -:FeatureName: Managing virtual machines with {pipelines-title} -include::snippets/technology-preview.adoc[] - - [id="prerequisites_virt-managing-vms-openshift-pipelines"] == Prerequisites @@ -25,28 +21,18 @@ include::snippets/technology-preview.adoc[] * You have installed the OpenShift CLI (`oc`). * You have link:https://docs.openshift.com/pipelines/latest/install_config/installing-pipelines.html[installed {pipelines-shortname}]. - -include::modules/virt-deploying-ssp.adoc[leveloffset=+1] - include::modules/virt-supported-ssp-tasks.adoc[leveloffset=+1] +[id="windows-efi-installer-pipeline_{context}"] +== Windows EFI installer pipeline -[id="example-pipelines_virt-managing-vms-openshift-pipelines"] -== Example pipelines - -The SSP Operator includes the following example `Pipeline` manifests. You can run the example pipelines by using the web console or CLI. - -You might have to run more than one installer pipeline if you need multiple versions of Windows. If you run more than one installer pipeline, each one requires unique parameters, such as the `autounattend` config map and base image name. For example, if you need Windows 10 and Windows 11 or Windows Server 2022 images, you have to run both the Windows efi installer pipeline and the Windows bios installer pipeline. However, if you need Windows 11 and Windows Server 2022 images, you have to run only the Windows efi installer pipeline. - -Windows EFI installer pipeline:: This pipeline installs Windows 11 or Windows Server 2022 into a new data volume from a Windows installation image (ISO file). A custom answer file is used to run the installation process. - -Windows BIOS installer pipeline:: This pipeline installs Windows 10 into a new data volume from a Windows installation image, also called an ISO file. A custom answer file is used to run the installation process. +You can run the link:https://artifacthub.io/packages/tekton-pipeline/redhat-pipelines/windows-efi-installer[Windows EFI installer pipeline] by using the web console or CLI. -Windows customize pipeline:: This pipeline clones the data volume of a basic Windows 10, 11, or Windows Server 2022 installation, customizes it by installing Microsoft SQL Server Express or Microsoft Visual Studio Code, and then creates a new image and template. +The Windows EFI installer pipeline installs Windows 10, Windows 11, or Windows Server 2022 into a new data volume from a Windows installation image (ISO file). A custom answer file is used to run the installation process. [NOTE] ==== -The example pipelines use a config map file with `sysprep` predefined by {product-title} and suitable for Microsoft ISO files. For ISO files pertaining to different Windows editions, it may be necessary to create a new config map file with a system-specific sysprep definition. +The Windows EFI installer pipeline uses a config map file with `sysprep` predefined by {product-title} and suitable for Microsoft ISO files. For ISO files pertaining to different Windows editions, it may be necessary to create a new config map file with a system-specific `sysprep` definition. ==== include::modules/virt-running-ssp-pipeline-web.adoc[leveloffset=+2] diff --git a/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc b/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc index 7fafc5f13f30..0f15528fb244 100644 --- a/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc +++ b/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc @@ -11,7 +11,7 @@ You can configure shared disks to allow multiple virtual machines (VMs) to share You configure disk sharing by exposing the storage as either of these types: * An ordinary VM disk -* A logical unit number (LUN) disk with an iSCSi connection and raw device mapping, as required for Windows Failover Clustering for shared volumes +* A logical unit number (LUN) disk with an SCSI connection and raw device mapping, as required for Windows Failover Clustering for shared volumes In addition to configuring disk sharing, you can also set an error policy for each ordinary VM disk or LUN disk. The error policy controls how the hypervisor behaves when an input/output error occurs on a disk Read or Write. diff --git a/virt/vm_networking/virt-connecting-vm-to-ovn-secondary-network.adoc b/virt/vm_networking/virt-connecting-vm-to-ovn-secondary-network.adoc index 6f28886a1f6a..df1d80aeb348 100644 --- a/virt/vm_networking/virt-connecting-vm-to-ovn-secondary-network.adoc +++ b/virt/vm_networking/virt-connecting-vm-to-ovn-secondary-network.adoc @@ -15,7 +15,7 @@ You can connect a virtual machine (VM) to an Open Virtual Network (OVN)-Kubernet ifndef::openshift-rosa,openshift-dedicated[] [NOTE] ==== -An OVN-Kubernetes secondary network is compatible with the xref:../../networking/multiple_networks/configuring-additional-network.adoc#compatibility-with-multi-network-policy_configuring-additional-network[multi-network policy API] which provides the `MultiNetworkPolicy` custom resource definition (CRD) to control traffic flow to and from VMs. You can use the `ipBlock` attribute to define network policy ingress and egress rules for specific CIDR blocks. +An OVN-Kubernetes secondary network is compatible with the xref:../../networking/multiple_networks/configuring-additional-network.adoc#compatibility-with-multi-network-policy_configuring-additional-network[multi-network policy API] which provides the `MultiNetworkPolicy` custom resource definition (CRD) to control traffic flow to and from VMs. ==== endif::openshift-rosa,openshift-dedicated[] @@ -46,14 +46,17 @@ include::modules/virt-creating-layer2-nad-cli.adoc[leveloffset=+2] include::modules/virt-creating-localnet-nad-cli.adoc[leveloffset=+2] +include::modules/virt-creating-nad-l2-overlay-console.adoc[leveloffset=+2] + +include::modules/virt-creating-nad-localnet-console.adoc[leveloffset=+2] + [id="attaching-vm-to-ovn-secondary-nw"] == Attaching a virtual machine to the OVN-Kubernetes secondary network You can attach a virtual machine (VM) to the OVN-Kubernetes secondary network interface by using the {product-title} web console or the CLI. include::modules/virt-attaching-vm-to-ovn-secondary-nw-cli.adoc[leveloffset=+2] -include::modules/virt-creating-nad-l2-overlay-console.adoc[leveloffset=+2] -include::modules/virt-creating-nad-localnet-console.adoc[leveloffset=+2] + ifndef::openshift-rosa,openshift-dedicated[] [role="_additional-resources"] diff --git a/virt/vm_networking/virt-dedicated-network-live-migration.adoc b/virt/vm_networking/virt-dedicated-network-live-migration.adoc index 3156ef061258..c3f26618c29f 100644 --- a/virt/vm_networking/virt-dedicated-network-live-migration.adoc +++ b/virt/vm_networking/virt-dedicated-network-live-migration.adoc @@ -20,4 +20,4 @@ include::modules/virt-selecting-migration-network-ui.adoc[leveloffset=+1] [role="_additional-resources"] [id="additional-resources_virt-migrating-vm-on-secondary-network"] == Additional resources -* xref:../../virt/live_migration/virt-configuring-live-migration#virt-configuring-live-migration-limits_virt-configuring-live-migration[Configuring live migration limits and timeouts] +* xref:../../virt/live_migration/virt-configuring-live-migration.adoc#virt-configuring-live-migration[Configuring live migration limits and timeouts] diff --git a/virt/vm_networking/virt-networking-overview.adoc b/virt/vm_networking/virt-networking-overview.adoc index 0b503fceed21..829350b77eaf 100644 --- a/virt/vm_networking/virt-networking-overview.adoc +++ b/virt/vm_networking/virt-networking-overview.adoc @@ -8,6 +8,32 @@ toc::[] {VirtProductName} provides advanced networking functionality by using custom resources and plugins. Virtual machines (VMs) are integrated with {product-title} networking and its ecosystem. +[NOTE] +==== +You cannot run {VirtProductName} on a single-stack IPv6 cluster. +==== + +The following figure illustrates the typical network setup of {VirtProductName}. Other configurations are also possible. + +.{VirtProductName} networking overview +image::318_OpenShift_Virtualization_Networking_0423.png[{VirtProductName} networking architecture] + +image:darkcircle-1.png[20,20] Pods and VMs run on the same network infrastructure which allows you to easily connect your containerized and virtualized workloads. + +image:darkcircle-2.png[20,20] You can connect VMs to the default pod network and to any number of secondary networks. + +image:darkcircle-3.png[20,20] The default pod network provides connectivity between all its members, service abstraction, IP management, micro segmentation, and other functionality. + +image:darkcircle-4.png[20,20] Multus is a "meta" CNI plugin that enables a pod or virtual machine to connect to additional network interfaces by using other compatible CNI plugins. + +image:darkcircle-5.png[20,20] The default pod network is overlay-based, tunneled through the underlying machine network. + +image:darkcircle-6.png[20,20] The machine network can be defined over a selected set of network interface controllers (NICs). + +image:darkcircle-7.png[20,20] Secondary VM networks are typically bridged directly to a physical network, with or without VLAN encapsulation. + +image:darkcircle-8.png[20,20] Secondary VM networks can be defined on dedicated set of NICs, as shown in Figure 1, or they can use the machine network. + include::modules/virt-networking-glossary.adoc[leveloffset=+1] @@ -32,6 +58,8 @@ endif::openshift-rosa,openshift-dedicated[] [id="secondary-network-config"] == Configuring VM secondary network interfaces +You can connect a virtual machine to a secondary network by using Linux bridge, SR-IOV and OVN-Kubernetes CNI plugins. You can list multiple secondary networks and interfaces in the VM specification. It is not required to specify the primary pod network in the VM specification when connecting to a secondary network interface. + // Hiding from ROSA/OSD as Linux Bridge not supported ifndef::openshift-rosa,openshift-dedicated[] xref:../../virt/vm_networking/virt-connecting-vm-to-linux-bridge.adoc#virt-connecting-vm-to-linux-bridge[Connecting a virtual machine to a Linux bridge network]:: @@ -85,6 +113,43 @@ endif::openshift-rosa,openshift-dedicated[] . xref:../../virt/vm_networking/virt-connecting-vm-to-ovn-secondary-network.adoc#attaching-vm-to-ovn-secondary-nw[Connect the VM to the OVN-Kubernetes secondary network] by adding the network details to the VM specification. +// Hiding from ROSA/OSD as Linux Bridge is not supported +ifndef::openshift-rosa,openshift-dedicated[] +[id="comparing-bridge-localnet"] +=== Comparing Linux bridge CNI and OVN-Kubernetes localnet topology + +The following table provides a comparison of features available when using the Linux bridge CNI versus the localnet topology for an OVN-Kubernetes plugin: + +.Linux bridge CNI compared to an OVN-Kubernetes localnet topology +[cols="1,1,1",options="header"] +|=== +|Feature +|Available on Linux bridge CNI +|Available on OVN-Kubernetes localnet + +|Layer 2 access to the underlay native network +|Only on secondary network interface controllers (NICs) +|Yes + +|Layer 2 access to underlay VLANs +|Yes +|Yes + +|Network policies +|No +|Yes + +|Managed IP pools +|No +|Yes + +|MAC spoof filtering +|Yes +|Yes + +|=== +endif::openshift-rosa,openshift-dedicated[] + // Hiding in ROSA/OSD as not supported ifndef::openshift-rosa,openshift-dedicated[] xref:../../virt/vm_networking/virt-hot-plugging-network-interfaces.adoc#virt-hot-plugging-network-interfaces[Hot plugging secondary network interfaces]:: diff --git a/welcome/index.adoc b/welcome/index.adoc index 249027e46e8b..b3d8a93ec05d 100644 --- a/welcome/index.adoc +++ b/welcome/index.adoc @@ -40,14 +40,14 @@ endif::[] ifdef::openshift-telco[] [.lead] -This documentation describes the telco core and telco RAN DU reference design specifications (RDS). [IMPORTANT] ==== -This content is shared with Red Hat telco partners only. +The telco core and telco RAN DU reference design specifications (RDS) are no longer published at this location. + +For the latest version of the telco RDS, see link:https://docs.openshift.com/container-platform/{product-version}/scalability_and_performance/telco_ref_design_specs/telco-ref-design-specs-overview.html[Telco core and RAN DU reference design specifications]. ==== -For documentation that is not telco-specific, see the link:https://docs.openshift.com/container-platform/latest/welcome/index.html[OpenShift Container Platform documentation]. endif::[] ifdef::openshift-dedicated[] @@ -70,7 +70,7 @@ ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] == Cluster installer activities Explore the following {product-title} installation tasks: -- **xref:../installing/index.adoc#ocp-installation-overview[{product-title} installation overview]**: Depending on the platform, you can install {product-title} on installer-provisioned or user-provisioned infrastructure. The {product-title} installation program provides the flexibility to deploy {product-title} on a range of different platforms. +- **xref:../installing/overview/index.adoc#ocp-installation-overview[{product-title} installation overview]**: Depending on the platform, you can install {product-title} on installer-provisioned or user-provisioned infrastructure. The {product-title} installation program provides the flexibility to deploy {product-title} on a range of different platforms. // PR open https://github.com/openshift/openshift-docs/pull/77474 //- **xref:../installing/installing_alibaba/installing-alibaba-assisted-installer[Installing a cluster on {alibaba} by using the Assisted Installer]**: On {alibaba}, you can install {product-title} by using the Assisted Installer. This is currently a Technology Preview feature only. @@ -136,19 +136,19 @@ endif::openshift-origin[] - **Install a cluster in an existing network**: If you use an existing Virtual Private Cloud (VPC) in xref:../installing/installing_aws/ipi/installing-aws-vpc.adoc#installing-aws-vpc[{aws-first}] or xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[{gcp-short}] or an existing -xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[VNet] +xref:../installing/installing_azure/ipi/installing-azure-vnet.adoc#installing-azure-vnet[VNet] on Microsoft Azure, you can install a cluster. Also consider xref:../installing/installing_gcp/installing-gcp-shared-vpc.adoc#installation-gcp-shared-vpc-prerequisites_installing-gcp-shared-vpc[Installing a cluster on {gcp-short} into a shared VPC] - **Install a private cluster**: If your cluster does not require external internet access, you can install a private cluster on xref:../installing/installing_aws/ipi/installing-aws-private.adoc#installing-aws-private[{aws-first}], -xref:../installing/installing_azure/installing-azure-private.adoc#installing-aws-private[{azure-full}], +xref:../installing/installing_azure/ipi/installing-azure-private.adoc#installing-azure-private[{azure-full}], xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[{gcp-short}], or xref:../installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc#preparing-to-install-on-ibm-cloud[{ibm-cloud-name}]. Internet access is still required to access the cloud APIs and installation media. - **xref:../installing/installing_bare_metal/installing-bare-metal.adoc#rhcos-install-iscsi-manual_installing-bare-metal[Installing RHCOS manually on an iSCSI boot device] and xref:../installing/installing_bare_metal/installing-bare-metal.adoc#rhcos-install-iscsi-ibft_installing-bare-metal[Installing RHCOS on an iSCSI boot device using iBFT]**: You can target iSCSI devices as the root disk for installation of {op-system}. Multipathing is also supported. -- **xref:../installing/installing-troubleshooting.adoc#installing-troubleshooting[Check installation logs]**: Access installation logs to evaluate issues that occur during {product-title} installation. +- **xref:../installing/validation_and_troubleshooting/installing-troubleshooting.adoc#installing-troubleshooting[Check installation logs]**: Access installation logs to evaluate issues that occur during {product-title} installation. - **xref:../web_console/web-console.adoc#web-console[Access {product-title}]**: Use credentials output at the end of the installation process to log in to the {product-title} cluster from the command line or web console. diff --git a/welcome/learn_more_about_openshift.adoc b/welcome/learn_more_about_openshift.adoc index cd0d2a02d12a..4c1c18f62bbf 100644 --- a/welcome/learn_more_about_openshift.adoc +++ b/welcome/learn_more_about_openshift.adoc @@ -41,7 +41,7 @@ Use the following sections to find content to help you learn about and use {prod |Learn about {product-title} |Deploy {product-title} |Manage {product-title} |Additional resources | link:https://www.openshift.com/blog/enterprise-kubernetes-with-openshift-part-one?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Enterprise Kubernetes with OpenShift] -| xref:../installing/installing-preparing.adoc#installing-preparing[Installing {product-title}] +| xref:../installing/overview/installing-preparing.adoc#installing-preparing[Installing {product-title}] | xref:../support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc#using-insights-to-identify-issues-with-your-cluster[Using Insights to identify issues with your cluster] | xref:../support/getting-support.adoc#getting-support[Getting Support] diff --git a/welcome/oke_about.adoc b/welcome/oke_about.adoc index d9136b3d5c82..de2606d72380 100644 --- a/welcome/oke_about.adoc +++ b/welcome/oke_about.adoc @@ -215,8 +215,8 @@ derived from the istio.io open source project, is not supported in {oke}. Also, the Kourier Ingress Controller found in OpenShift Serverless is not supported on {oke}. -=== {sandboxed-containers-first} -{oke} does not include {sandboxed-containers-first}. Use {product-title} for this support. +=== {osc} +{oke} does not include {osc}. Use {product-title} for this support. === Developer experience With {oke}, the following capabilities are not supported: diff --git a/windows_containers/enabling-windows-container-workloads.adoc b/windows_containers/enabling-windows-container-workloads.adoc index 6688e173c5a8..27a20a7f08b3 100644 --- a/windows_containers/enabling-windows-container-workloads.adoc +++ b/windows_containers/enabling-windows-container-workloads.adoc @@ -28,7 +28,7 @@ Dual NIC is not supported on WMCO-managed Windows instances. [NOTE] ==== -Windows instances deployed by the WMCO are configured with the containerd container runtime. Because WMCO installs and manages the runtime, it is recommanded that you do not manually install containerd on nodes. +Windows instances deployed by the WMCO are configured with the containerd container runtime. Because WMCO installs and manages the runtime, it is recommended that you do not manually install containerd on nodes. ==== [role="_additional-resources"] @@ -40,6 +40,11 @@ Windows instances deployed by the WMCO are configured with the containerd contai You can install the Windows Machine Config Operator using either the web console or OpenShift CLI (`oc`). +[NOTE] +==== +Due to a limitation within the Windows operating system, `clusterNetwork` CIDR addresses of class E, such as `240.0.0.0`, are not compatible with Windows nodes. +==== + include::modules/installing-wmco-using-web-console.adoc[leveloffset=+2] include::modules/installing-wmco-using-cli.adoc[leveloffset=+2] @@ -52,9 +57,18 @@ include::modules/wmco-cluster-wide-proxy.adoc[leveloffset=+1] * xref:../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide-proxy[Configuring the cluster-wide proxy]. +include::modules/wmco-disconnected-cluster.adoc[leveloffset=+1] + +.Additional references + +* xref:../installing/disconnected_install/index.adoc#installing-mirroring-disconnected-about[About disconnected installation mirroring] + +include::modules/images-configuration-registry-mirror.adoc[leveloffset=+2] + +include::modules/images-configuration-registry-mirror-configuring.adoc[leveloffset=+2] [role="_additional-resources"] == Additional resources -* xref:../installing/installing_azure/installing-azure-default.adoc#ssh-agent-using_installing-azure-default[Generating a key pair for cluster node SSH access] +* xref:../installing/installing_azure/ipi/installing-azure-default.adoc#ssh-agent-using_installing-azure-default[Generating a key pair for cluster node SSH access] * xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster]. diff --git a/windows_containers/scheduling-windows-workloads.adoc b/windows_containers/scheduling-windows-workloads.adoc index 3a4e9ee5a7a0..6b1f9002845a 100644 --- a/windows_containers/scheduling-windows-workloads.adoc +++ b/windows_containers/scheduling-windows-workloads.adoc @@ -22,11 +22,15 @@ include::modules/windows-pod-placement.adoc[leveloffset=+1] === Additional resources * xref:../nodes/scheduling/nodes-scheduler-about.adoc#nodes-scheduler-about[Controlling pod placement using the scheduler] +ifndef::openshift-dedicated,openshift-rosa[] * xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] +endif::openshift-dedicated,openshift-rosa[] * xref:../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors] include::modules/creating-runtimeclass.adoc[leveloffset=+1] include::modules/sample-windows-workload-deployment.adoc[leveloffset=+1] +include::modules/wmco-supported-csi-drivers.adoc[leveloffset=+1] + include::modules/machineset-manually-scaling.adoc[leveloffset=+1]