diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml deleted file mode 100644 index f511cd20..00000000 --- a/.github/workflows/ansible-lint.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -name: Ansible lint -permissions: - contents: read - -"on": - push: - branches: - - main - paths: - - 'playbooks/**' - pull_request: - paths: - - 'playbooks/**' - -jobs: - build: - name: Ansible Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Run ansible-lint - uses: ansible/ansible-lint@v24 diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 6b5140c8..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -- job: - name: openstack-e2e-abstract - abstract: true - parent: openstack-access-base - description: | - An abstract job for e2e testing of cluster stacks project. - This job is not intended to be run directly, but instead - must be inherited from it. - pre-run: playbooks/dependencies.yaml - run: playbooks/openstack/e2e.yaml - cleanup-run: playbooks/openstack/cleanup.yaml # executed also when the job is canceled - vars: - wait_for_cluster_stack_resource: 120 # 2min - wait_for_clusteraddons: 120 # 2min - wait_for_cluster_stack: 1440 # 24min - wait_for_cluster: 600 # 10min - sonobouy: - enabled: false - scs_compliance: - enabled: false - -- job: - name: e2e-openstack-conformance - parent: openstack-e2e-abstract - description: | - Run e2e tests of cluster-stacks project using - [sonobuoy](https://sonobuoy.io/) with mode non-disruptive conformance and - SCS compliance checks meaning it will test if the Kubernetes - cluster is conformant to the CNCF and to the SCS. - timeout: 10800 # 3h - vars: - wait_for_cluster: 1200 # 20min - sonobouy: - enabled: true - mode: certified-conformance - scs_compliance: - enabled: true - -- job: - name: e2e-openstack-quick - parent: openstack-e2e-abstract - description: | - Run e2e tests of cluster-stacks project using - [sonobuoy](https://sonobuoy.io/) with mode quick and - SCS compliance checks. - timeout: 7200 # 2h - vars: - wait_for_cluster: 1200 # 20min - sonobouy: - enabled: true - mode: quick - scs_compliance: - enabled: true - - -- project: - name: SovereignCloudStack/cluster-stacks - default-branch: main - merge-mode: "squash-merge" - e2e-test: - jobs: - - e2e-openstack-conformance - unlabel-on-update-e2e-test: - jobs: - - noop - e2e-quick-test: - jobs: - - e2e-openstack-quick - unlabel-on-update-e2e-quick-test: - jobs: - - noop diff --git a/docs/continuous-integration.md b/docs/continuous-integration.md deleted file mode 100644 index 7cc53285..00000000 --- a/docs/continuous-integration.md +++ /dev/null @@ -1,159 +0,0 @@ -# Continuous integration - -Project `cluster-stacks` use the [SCS Zuul](https://zuul.scs.community) CI platform to -drive its continuous integration tests. The project is registered under the [SCS tenant](https://zuul.scs.community/t/SCS/projects) -and therefore is able to use a set of pre-defined pipelines, jobs, and ansible roles that the -SCS Zuul instance defines and imports. If you want to explore currently available SCS pipelines, -visit the [SCS zuul-config](https://github.com/SovereignCloudStack/zuul-config) project. -If you want to see the full list of jobs that are available, visit the [SCS Zuul UI](https://zuul.scs.community/t/SCS/jobs). -And if you are looking for some handy ansible role that SCS Zuul imports, visit the [source](https://opendev.org/zuul/zuul-jobs/src/branch/master/roles). - -Refer to the SCS [Zuul users guide](https://github.com/SovereignCloudStack/docs/blob/main/contributor-docs/operations/operations/zuul-ci-cd-quickstart-user-guide.md) and/or -[Zuul docs](https://zuul-ci.org/docs/) for further details on how to define and use Zuul -CI/CD pipelines and jobs. - -> [!NOTE] -> If you are interested in the Zuul CI platform and want to deploy your own development instance of it, -then read the official [quick-start](https://zuul-ci.org/docs/zuul/latest/tutorials/quick-start.html) manual -or visit [this](https://github.com/matofederorg/zuul-config?tab=readme-ov-file#zuul-ci) tutorial which aims a connect -of Zuul CI platform with a GitHub organization. - -## Configuration - -SCS Zuul automatically recognizes `.zuul.yaml` configuration file that is located in the -cluster-stacks's root. This file informs Zuul about the project's [default-branch](https://zuul-ci.org/docs/zuul/latest/config/project.html#attr-project.default-branch) and -preferred [merge-mode](https://zuul-ci.org/docs/zuul/latest/config/project.html#attr-project.merge-mode). -It also references [SCS Zuul pipelines](https://github.com/SovereignCloudStack/zuul-config) and -their jobs used by the cluster-stacks project. Then, jobs link Ansible playbooks that contain -tasks for actual CI testing. - -See relevant CI configuration files: - -```text -├── .zuul.yaml -├── playbooks -│ ├── dependencies.yaml -│ ├── openstack -│ │ ├── e2e.yaml -│ │ ├── templates -│ │ │ ├── mgmt-cluster-config.yaml.j2 -│ │ │ ├── cluster.yaml.j2 -│ │ │ └── cluster-stack-template.yaml.j2 -``` - -## Pipelines - -This section describes an [SCS Zuul pipelines](https://github.com/SovereignCloudStack/zuul-config/blob/main/zuul.d/gh_pipelines.yaml) that are used by the cluster-stacks project. - -- `e2e-test` - - It is triggered by the `e2e-test` label in the opened PR - - It executes `e2e-openstack-conformance` job - - It applies the PR label `successful-e2e-test` and leaves an informative PR comment when the `e2e-openstack-conformance` job succeeded - - It applies the PR label `failed-e2e-test` and leaves an informative PR comment when the `e2e-openstack-conformance` job failed - - It applies the PR label `cancelled-e2e-test` and leaves an informative PR comment when the `e2e-openstack-conformance` job is canceled - -- `unlabel-on-update-e2e-test` - - It is triggered by the PR update only when PR contains the `successful-e2e-test` label - - It ensures that any PR update invalidates a previous successful e2e test - - It removes `successful-e2e-test` label from the PR - -- `e2e-quick-test` - - It is triggered by the `e2e-quick-test` label in the opened PR - - It executes `e2e-openstack-quick` job - - It applies the PR label `successful-e2e-quick-test` and leaves an informative PR comment when the `e2e-openstack-quick` job succeeded - - It applies the PR label `failed-e2e-quick-test` and leaves an informative PR comment when the `e2e-openstack-quick` job failed - - It applies the PR label `cancelled-e2e-quick-test` and leaves an informative PR comment when the `e2e-openstack-quick` job is canceled - -- `unlabel-on-update-e2e-quick-test` - - It is triggered by the PR update only when PR contains the `successful-e2e-quick-test` label - - It ensures that any PR update invalidates a previous successful e2e test - - It removes `successful-e2e-quick-test` label from the PR - -## Jobs - -This section describes Zuul jobs defined within the cluster-stacks project and linked in the above pipelines. - -- `e2e-openstack-conformance` - - It runs a sonobuoy conformance test against Kubernetes cluster spawned by a specific cluster-stack - - This job is a child job of `openstack-access-base` that ensures OpenStack credentials - availability in Zuul worker node. Parent job also defines a Zuul semaphore `semaphore-openstack-access`, - that ensures that a maximum of three `openstack-access-base` jobs (or their children) can run at a time - - See a high level `e2e-openstack-conformance` job steps: - - Pre-run playbook `dependencies.yaml` installs project prerequisites, e.g. clusterctl, KinD, csctl, etc. - - Main playbook `e2e.yaml` spawns a k8s workload cluster using a specific cluster-stack in OpenStack, runs sonobuoy conformance test, SCS compliance test, and cleans created k8s workload cluster - -- `e2e-openstack-quick` - - It runs a sonobuoy quick test against Kubernetes cluster spawned by a specific cluster-stack - - This job is a child job of `openstack-access-base` that ensures OpenStack credentials - availability in Zuul worker node. Parent job also defines a Zuul semaphore `semaphore-openstack-access`, - that ensures that a maximum of three `openstack-access-base` jobs (or their children) can run at a time - - See a high level `e2e-openstack-quick` job steps: - - Pre-run playbook `dependencies.yaml` installs project prerequisites, e.g. clusterctl, KinD, csctl, etc. - - Main playbook `e2e.yaml` spawns a k8s workload cluster using a specific cluster-stack in OpenStack, runs sonobuoy quick test, SCS compliance test, and cleans created k8s workload cluster - -### Secrets - -The parent job `openstack-access-base`, from which e2e jobs inherit, defines the secret variable `openstack-application-credential`. -This secret is stored directly in the [SCS/zuul-config repository](https://github.com/SovereignCloudStack/zuul-config/blob/main/zuul.d/secrets.yaml) in an encrypted form. It contains OpenStack application credentials to access the OpenStack project dedicated to CI testing. - -This secret is encrypted by the SCS/zuul-config repository RSA key that has been generated by SCS Zuul instance. -So only SCS Zuul instance is able to decrypt it (read the [docs](https://zuul-ci.org/docs/zuul/latest/project-config.html#encryption)). - -If you want to re-generate the mentioned secret or add another one using SCS/zuul-config repository RSA key, follow the below instructions: - -- Install zuul-client - -```bash -pip install zuul-client -``` - -- Encrypt "super-secret" string by the SCS/zuul-config repository public key from SCS Zuul - -```bash -echo -n "super-secret" | \ - zuul-client --zuul-url https://zuul.scs.community encrypt \ - --tenant SCS \ - --project github.com/SovereignCloudStack/zuul-config -``` - -### Job customization - -In a pull request (PR), you may want to run the end-to-end (e2e) test against the specific cluster-stack you are changing or adding, without modifying the `cluster_stack` variable in the `e2e.yaml` file in the repository. - -To achieve this, include the following text in the body of the PR: - -```text - ```ZUUL_CONFIG - cluster_stack = "openstack-alpha-1-29" - ``` -``` - -> [!NOTE] -> Please note that only cluster-stacks for OpenStack are currently supported. - -### FAQ - -#### How do developers/reviewers should proceed if they want to CI test this project? - -A developer initiates a PR as usual. If a reviewer deems that the PR requires e2e testing, they can apply a specific label to the PR. Currently, the following labels could be applied: - -- `e2e-test` (for comprehensive end-to-end (e2e) testing, including Kubernetes (k8s) workload cluster creation, execution of Sonobuoy conformance and SCS compliance tests, and cluster deletion.) -- `e2e-quick-test` (for comprehensive end-to-end (e2e) testing, including Kubernetes (k8s) workload cluster creation, execution of Sonobuoy quick and SCS compliance tests, and cluster deletion.) - -After the e2e test has completed, the reviewer can examine the test results and respond accordingly, such as approving the PR if everything appears to be in order or requesting changes. Sonobuoy test results, along with a link to the e2e logs, are conveyed back to the PR via a comment. Additionally, the PR is labeled appropriately based on the overall e2e test results, using labels like -`successful-e2e-test`, `successful-e2e-quick-test`, `failed-e2e-test`, or `failed-e2e-quick-test`. - -#### Why do we use PR `label` as an e2e pipeline trigger instead of e.g. PR `comment`? - -We consider PR labels to be a more secure pipeline trigger compared to, for example, PR comments. -PR labels can only be applied by developers with [triage](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/managing-repository-roles/repository-roles-for-an-organization#permissions-for-each-role) repository access or higher. -In contrast, PR comments can be added by anyone with a GitHub account. - -Members of the SCS GitHub organization are automatically granted 'write' access to SCS repositories. Consequently, the PR label mechanism ensures that only SCS organization members can trigger e2e pipelines. - -#### How do we ensure that any PR update invalidates a previous successful e2e test? - -In fact, two mechanisms ensure the invalidation of a previously successful test when a PR is updated. - -Firstly, the pipelines `unlabel-on-update-` remove the `successful-` label -from the PR when it's updated after a successful e2e test has finished. If an e2e test is in progress and the PR is updated, the currently running e2e test is canceled, the `successful-` label is removed (if it exists), and the `cancelled-` label is applied along with an informative PR comment to inform the reviewer about the situation. diff --git a/playbooks/dependencies.yaml b/playbooks/dependencies.yaml deleted file mode 100644 index a4c5dd8c..00000000 --- a/playbooks/dependencies.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -- name: Ensure cluster stacks dependencies - hosts: all - vars: - kind_version: "0.22.0" - kubectl_version: "1.29.3" - clusterctl_version: "1.7.2" - helm_version: "3.14.4" - yq_version: "4.44.1" - envsubst_version: "1.4.2" - install_dir: "{{ ansible_user_dir }}/.local/bin" - roles: # https://opendev.org/zuul/zuul-jobs - - role: ensure-docker - - role: ensure-go - vars: - go_version: 1.21.6 - environment: - PATH: "{{ install_dir }}:{{ ansible_env.PATH }}" - tasks: - - name: Make sure installation directory exists - ansible.builtin.file: - path: "{{ install_dir }}" - state: directory - mode: 0755 - - name: Install clusterctl - ansible.builtin.get_url: - url: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v{{ clusterctl_version }}/clusterctl-linux-amd64" - dest: "{{ install_dir }}/clusterctl" - mode: "+x" - - name: Install envsubst - ansible.builtin.get_url: - url: "https://github.com/a8m/envsubst/releases/download/v{{ envsubst_version }}/envsubst-Linux-x86_64" - dest: "{{ install_dir }}/envsubst" - mode: "+x" - - name: Install yq - ansible.builtin.get_url: - url: "https://github.com/mikefarah/yq/releases/download/v{{ yq_version }}/yq_linux_amd64" - dest: "{{ install_dir }}/yq" - mode: "+x" - - name: Install KinD - ansible.builtin.get_url: - url: "https://kind.sigs.k8s.io/dl/v{{ kind_version }}/kind-linux-amd64" - dest: "{{ install_dir }}/kind" - mode: "+x" - - name: Install kubectl - ansible.builtin.get_url: - url: "https://dl.k8s.io/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl" - dest: "{{ install_dir }}/kubectl" - mode: "+x" - # TODO: Install csctl and csctl-openstack from the release when it will be available - - name: Install csctl and csctl-openstack - ansible.builtin.import_tasks: tasks/csctl.yaml - - name: Install helm - ansible.builtin.unarchive: - src: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz" - dest: "{{ install_dir }}" - extra_opts: "--strip-components=1" - mode: "+x" - remote_src: true - args: - creates: "{{ install_dir }}/helm" diff --git a/playbooks/openstack/cleanup.yaml b/playbooks/openstack/cleanup.yaml deleted file mode 100644 index 8f508ffe..00000000 --- a/playbooks/openstack/cleanup.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: Cleanup - hosts: all - vars: - cloud_name: "{{ cloud }}" # inherited from the parent job - environment: - PATH: "{{ ansible_user_dir }}/.local/bin:{{ ansible_env.PATH }}" - tasks: - - name: Delete server groups - when: scs_compliance.enabled - block: - - name: List existing server groups - ansible.builtin.command: "openstack server group list -f value -c Name -c ID" - register: server_groups - environment: - OS_CLOUD: "{{ cloud_name }}" - changed_when: true - - name: Parse test-cluster-controller srvgrp and assign ID to srvgrp_controller - ansible.builtin.set_fact: - srvgrp_controller: "{{ item.split(' ')[0] }}" - loop: "{{ server_groups.stdout_lines }}" - when: "server_groups is defined and server_groups.stdout_lines | length > 0 and 'test-cluster-controller' in item.split(' ')" - - name: Parse test-cluster-worker srvgrp and assign ID to srvgrp_worker - ansible.builtin.set_fact: - srvgrp_worker: "{{ item.split(' ')[0] }}" - loop: "{{ server_groups.stdout_lines }}" - when: "server_groups is defined and server_groups.stdout_lines | length > 0 and 'test-cluster-worker' in item.split(' ')" - - name: Delete Server Group for worker nodes - ansible.builtin.command: "openstack server group delete {{ srvgrp_worker }}" - environment: - OS_CLOUD: "{{ cloud_name }}" - when: srvgrp_worker is defined - changed_when: true - - name: Delete Server Group for control-plane nodes - ansible.builtin.command: "openstack server group delete {{ srvgrp_controller }}" - environment: - OS_CLOUD: "{{ cloud_name }}" - when: srvgrp_controller is defined - changed_when: true - - name: Check if test-cluster exists - ansible.builtin.command: "kubectl get cluster test-cluster" - register: cluster_check - ignore_errors: true - changed_when: true - - name: Cleanup workload cluster - ansible.builtin.command: "kubectl delete -f {{ ansible_user_dir }}/cluster.yaml" - when: cluster_check.rc == 0 - changed_when: true diff --git a/playbooks/openstack/e2e.yaml b/playbooks/openstack/e2e.yaml deleted file mode 100644 index f71f6088..00000000 --- a/playbooks/openstack/e2e.yaml +++ /dev/null @@ -1,246 +0,0 @@ ---- -- name: Cluster stack OpenStack E2E test - hosts: all - vars: - cluster_stack_path: "providers/openstack/scs" - project_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" - cluster_stack_release_dir: "{{ ansible_user_dir }}/.release" - cluster_manifest_dir: "{{ ansible_user_dir }}/cluster_manifest" - cluster_stack_release_container_dir: "/.release" - openstack_csp_helper_chart_version: v0.6.0 - openstack_csp_helper_chart_url: "https://github.com/SovereignCloudStack/openstack-csp-helper/releases/download/{{ openstack_csp_helper_chart_version }}/openstack-csp-helper.tgz" - capo_version: "v0.10.3" - openstackclient_version: "6.6.0" - - k8s_management_name: "management" - k8s_management_version: "v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245" - k8s_management_cluster_wait_for: "180" - - environment: - PATH: "{{ ansible_user_dir }}/.local/bin:{{ ansible_env.PATH }}" - tasks: - - name: Determine cluster stack directory - block: - - name: Get PR details - ansible.builtin.uri: - url: "https://api.github.com/repos/{{ zuul.project.name }}/pulls/{{ zuul.change }}" - body_format: json - headers: - Accept: application/vnd.github+json - X-GitHub-Api-Version: 2022-11-28 - register: pull_request - when: zuul.change is defined # execute when the e2e pipeline is initiated on a PR - - name: Set facts when the e2e pipeline is initiated on a PR - ansible.builtin.set_fact: - git_branch_name: "{{ pull_request.json.head.ref }}" - git_repository_url: "{{ pull_request.json.head.repo.clone_url }}" - when: zuul.change is defined # execute when the e2e pipeline is initiated on a PR - - name: Checkout to PR branch - ansible.builtin.git: - repo: "{{ git_repository_url }}" - clone: false - dest: "{{ project_dir }}" - version: "{{ git_branch_name }}" - when: zuul.change is defined # execute when the e2e pipeline is initiated on a PR - - name: Make sure directory structure exists - ansible.builtin.file: - path: "{{ item }}" - state: directory - mode: 0755 - loop: - - "{{ cluster_manifest_dir }}" - - "{{ cluster_stack_release_dir }}" - - name: Extract Zuul config - ansible.builtin.set_fact: - zuul_config: "{{ zuul.change_message | regex_search('(?s)```ZUUL_CONFIG(.+?)```', '\\1', multiline=true) }}" - when: zuul.change_message is defined - - name: Trim Zuul config - ansible.builtin.set_fact: - zuul_config: "{{ zuul_config | first | split('/n') | map('trim') | join('\n') }}" - when: zuul_config is defined and zuul_config is not none and zuul_config != '' - - name: Extract cluster_stack_folder from Zuul config - ansible.builtin.set_fact: - cluster_stack_folder: "{{ zuul_config | regex_search('cluster_stack_folder\\s*=\\s*\"([^\"]+)\"', '\\1') | first }}" - when: - - zuul_config is defined and zuul_config is not none and zuul_config != '' - - zuul_config | regex_search('cluster_stack\\s*=\\s*\"([^\"]+)\"') is defined - - name: Override cluster_stack_path if the cluster_stack_folder extracted - ansible.builtin.set_fact: - cluster_stack_path: "providers/openstack/{{ cluster_stack_folder }}" - when: cluster_stack_folder is defined - - name: Create cluster stack - ansible.builtin.command: "csctl create {{ project_dir }}/{{ cluster_stack_path }} --output {{ cluster_stack_release_dir }} --mode hash" - args: - chdir: "{{ project_dir }}" - changed_when: true - - name: Ensure management cluster - block: - - name: Create management cluster config file - ansible.builtin.template: - src: "mgmt-cluster-config.yaml.j2" - dest: "{{ ansible_user_dir }}/mgmt-cluster-config.yaml" - mode: "0644" - - name: Create management cluster - ansible.builtin.command: "kind create cluster --config {{ ansible_user_dir }}/mgmt-cluster-config.yaml" - changed_when: true - - name: Wait for all system pods in the management cluster to become ready - ansible.builtin.command: "kubectl wait -n kube-system --for=condition=Ready --timeout={{ k8s_management_cluster_wait_for }}s pod --all" - changed_when: true - - name: Install CAPI and CAPO - ansible.builtin.command: "clusterctl init --infrastructure openstack:{{ capo_version }}" - changed_when: true - environment: - CLUSTER_TOPOLOGY: "true" - EXP_CLUSTER_RESOURCE_SET: "true" - - name: Install CSO and mount cluster stack release - ansible.builtin.import_tasks: ../tasks/cso.yaml - vars: - release_dir: "{{ cluster_stack_release_container_dir }}" - - name: Install CSPO and mount cluster stack release - ansible.builtin.import_tasks: ../tasks/cspo.yaml - vars: - release_dir: "{{ cluster_stack_release_container_dir }}" - - name: Read Zuul's clouds.yaml content, base64 encoded - ansible.builtin.slurp: - src: /etc/openstack/clouds.yaml - register: clouds_yaml_b64 - - name: Read Zuul's secure.yaml content, base64 encoded - ansible.builtin.slurp: - src: /etc/openstack/secure.yaml - register: secure_yaml_b64 - - name: Combine clouds_yaml_b64 and secure_yaml_b64 to produce full clouds.yaml - ansible.builtin.set_fact: - clouds_yaml_full: "{{ clouds_yaml_b64.content | b64decode | from_yaml | ansible.builtin.combine(secure_yaml_b64.content | b64decode | from_yaml, recursive=true) }}" - no_log: true - - name: Write clouds.yaml file - ansible.builtin.copy: - content: "{{ clouds_yaml_full | to_yaml }}" - dest: "{{ ansible_user_dir }}/clouds.yaml" - mode: "0644" - - name: Create secrets and ClusterResourceSet for the clusterstacks approach - ansible.builtin.shell: - cmd: | - set -o pipefail - helm upgrade -i clusterstacks-credentials {{ openstack_csp_helper_chart_url }} -f {{ ansible_user_dir }}/clouds.yaml - executable: /bin/bash - changed_when: true - - name: Find the directory containing metadata.yaml - ansible.builtin.find: - paths: "{{ cluster_stack_release_dir }}" - patterns: "metadata.yaml" - recurse: true - register: found_files - - name: Read metadata.yaml - ansible.builtin.slurp: - src: "{{ found_files.files[0].path }}" - register: metadata_content - when: found_files.matched > 0 - - name: Get cluster-stack and k8s version - ansible.builtin.set_fact: - cluster_stack_version: "{{ (metadata_content['content'] | b64decode | from_yaml)['versions']['clusterStack'] }}" - k8s_version: "{{ (metadata_content['content'] | b64decode | from_yaml)['versions']['kubernetes'] }}" - when: metadata_content is defined - - name: Parse k8s version to major.minor - ansible.builtin.set_fact: - k8s_version_major_minor: "{{ k8s_version | regex_replace('^v?([0-9]+\\.[0-9]+)\\..*', '\\1') }}" - when: k8s_version is defined - - name: Read the csctl.yaml file - ansible.builtin.slurp: - src: "{{ project_dir }}/{{ cluster_stack_path }}/csctl.yaml" - register: csctl_file_content - - name: Parse the csctl.yaml content - ansible.builtin.set_fact: - csctl_data: "{{ csctl_file_content.content | b64decode | from_yaml }}" - - name: Register cluster_stack_version_name - ansible.builtin.set_fact: - cluster_stack_version_name: "{{ csctl_data.config.clusterStackName }}" - - name: Format the kubernetesVersion for cluster_stack_name - ansible.builtin.set_fact: - k8s_version_formatted: "{{ k8s_version_major_minor | regex_replace('\\.', '-') }}" - - name: Create the cluster_stack_name - ansible.builtin.set_fact: - cluster_stack_name: "{{ csctl_data.config.provider.type }}-{{ csctl_data.config.clusterStackName }}-{{ k8s_version_formatted }}" - - name: Extract cloud name from clouds_yaml_full - ansible.builtin.set_fact: - cloud_name: "{{ clouds_yaml_full.clouds.keys() | first }}" - when: clouds_yaml_full.clouds is defined and clouds_yaml_full.clouds | dict2items | length == 1 - - name: Generate clusterstack YAML - ansible.builtin.template: - src: "cluster-stack-template.yaml.j2" - dest: "{{ ansible_user_dir }}/clusterstack.yaml" - mode: "0644" - - name: Apply cluster-stack template - ansible.builtin.command: "kubectl apply -f {{ ansible_user_dir }}/clusterstack.yaml" - changed_when: true - - name: Necessary pause for the clusterstack resource to exist (default is 2 minutes) - ansible.builtin.pause: - seconds: "{{ wait_for_cluster_stack_resource }}" - - name: Wait for cluster-stack to be ready - ansible.builtin.command: "kubectl wait clusterstack/clusterstack --for=condition=Ready --timeout={{ wait_for_cluster_stack }}s" - changed_when: true - - name: Create k8s workload cluster and execute checks - block: - - name: Create Server Groups for nodes when scs_compliance tests are enabled - ansible.builtin.import_tasks: ../tasks/create_server_groups.yaml - when: scs_compliance.enabled - - name: Generate cluster YAML - ansible.builtin.template: - src: "cluster.yaml.j2" - dest: "{{ ansible_user_dir }}/cluster.yaml" - mode: "0644" - vars: - worker_server_group_id: "{{ srvgrp_worker.stdout | default('') }}" - worker_server_group_id_value: "{% if worker_server_group_id == '' %}\"\"{% else %}{{ worker_server_group_id }}{% endif %}" - controller_server_group_id: "{{ srvgrp_controller.stdout | default('') }}" - controller_server_group_id_value: "{% if controller_server_group_id == '' %}\"\"{% else %}{{ controller_server_group_id }}{% endif %}" - - name: Apply cluster template - ansible.builtin.command: "kubectl apply -f {{ ansible_user_dir }}/cluster.yaml" - changed_when: true - - name: Get kubeadmcontrolplane name - ansible.builtin.command: "kubectl get kubeadmcontrolplane -o=jsonpath='{.items[0].metadata.name}'" - retries: 6 - delay: 10 - until: kcp_name.rc == 0 - register: kcp_name - changed_when: true - - name: Get kubeadmcontrolplane status - ansible.builtin.command: "kubectl wait kubeadmcontrolplane/{{ kcp_name.stdout }} --for=condition=Available --timeout={{ wait_for_cluster }}s" - changed_when: true - - name: Wait for control-plane machines to be ready - ansible.builtin.command: "kubectl wait machines --for=condition=Ready -l cluster.x-k8s.io/control-plane,cluster.x-k8s.io/cluster-name=test-cluster --timeout={{ wait_for_cluster }}s" - changed_when: true - - name: Get kubeconfig of the workload k8s cluster - ansible.builtin.shell: "clusterctl get kubeconfig test-cluster > {{ cluster_manifest_dir }}/kubeconfig-test-cluster" - changed_when: true - - name: Wait for clusteraddons resource to become ready - ansible.builtin.command: "kubectl wait clusteraddons/cluster-addon-test-cluster --for=condition=Ready --timeout={{ wait_for_clusteraddons }}s" - changed_when: true - - name: Wait for all system pods in the workload k8s cluster to become ready - ansible.builtin.command: "kubectl wait -n kube-system --for=condition=Ready --timeout={{ wait_for_cluster }}s pod --all" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - changed_when: true - - name: Import sonobouy tasks - ansible.builtin.import_tasks: ../tasks/sonobouy.yaml - when: sonobouy.enabled - - name: Import scs_compliance pre-tasks - ansible.builtin.import_tasks: ../tasks/label_nodes.yaml - vars: - os_cloud: "{{ cloud_name }}" - kubeconfig_path: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - when: scs_compliance.enabled - - name: Import scs_compliance tasks - ansible.builtin.import_tasks: ../tasks/scs_compliance.yaml - vars: - kubeconfig_path: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - when: scs_compliance.enabled - always: - - name: Delete Server Groups - ansible.builtin.command: "openstack server group delete {{ srvgrp_worker.stdout }} {{ srvgrp_controller.stdout }}" - environment: - OS_CLOUD: "{{ cloud_name }}" - changed_when: true - when: scs_compliance.enabled - - name: Cleanup workload cluster - ansible.builtin.command: "kubectl delete -f {{ ansible_user_dir }}/cluster.yaml" - changed_when: true diff --git a/playbooks/openstack/files/patch_csx_deployment.sh b/playbooks/openstack/files/patch_csx_deployment.sh deleted file mode 100755 index fbc31ddb..00000000 --- a/playbooks/openstack/files/patch_csx_deployment.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -# ./patch_csx_deployment.sh csx_manifest.yaml HOST_PATH_DIR -# -# Script adjusts CSO or CSPO manifest to use local mode. -# It injects cluster stack release assets via the given HOST_PATH_DIR volume and mount to the CSO or CSPO containers and -# enables local mode for them. - -if test -z "$1"; then echo "ERROR: Need CSO or CSPO manifest file arg" 1>&2; exit 1; fi -if test -z "$2"; then echo "ERROR: Need HOST_PATH_DIR arg" 1>&2; exit 1; fi - -# Test whether the argument is already present in CSX manager container args -local_mode_exist=$(yq 'select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "manager").args[] | select(. == "--local=true")' "$1") - -if test -z "$local_mode_exist"; then - echo "Enabling local mode for the CSX manager container" - yq 'select(.kind == "Deployment").spec.template.spec.containers[] |= select(.name == "manager").args += ["--local=true"]' -i "$1" -else - echo "Local mode is already enabled in the CSX manager container" -fi - -export HOST_PATH_DIR=$2 -export VOLUME_SNIPPET=volume_snippet.yaml -export VOLUME_MOUNT_SNIPPET=volume_mount_snippet.yaml - -yq --null-input ' - { - "name": "cluster-stacks-volume", - "hostPath": - { - "path": env(HOST_PATH_DIR), - "type": "Directory" - } - }' > $VOLUME_SNIPPET - -yq --null-input ' - { - "name": "cluster-stacks-volume", - "mountPath": "/tmp/downloads/cluster-stacks", - "readOnly": true - }' > $VOLUME_MOUNT_SNIPPET - -# Test whether the mountPath: /tmp/downloads/cluster-stacks is already present in CSX manager container mounts -mount_exist=$(yq 'select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "manager").volumeMounts[] | select(.mountPath == "/tmp/downloads/cluster-stacks")' "$1") - -if test -z "$mount_exist"; then - echo "Injecting volume and volume mount to the CSX manager container" - yq 'select(.kind == "Deployment").spec.template.spec.containers[] |= select(.name == "manager").volumeMounts += [load(env(VOLUME_MOUNT_SNIPPET))]' -i "$1" - yq 'select(.kind == "Deployment").spec.template.spec.volumes += [load(env(VOLUME_SNIPPET))]' -i "$1" -else - echo "Mount path /tmp/downloads/cluster-stacks is already present in the CSX manager container" -fi - -rm $VOLUME_SNIPPET -rm $VOLUME_MOUNT_SNIPPET - -exit 0 diff --git a/playbooks/openstack/templates/cluster-stack-template.yaml.j2 b/playbooks/openstack/templates/cluster-stack-template.yaml.j2 deleted file mode 100644 index 0c451d9b..00000000 --- a/playbooks/openstack/templates/cluster-stack-template.yaml.j2 +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: clusterstack.x-k8s.io/v1alpha1 -kind: ClusterStack -metadata: - name: clusterstack -spec: - provider: openstack - name: {{ cluster_stack_version_name }} - kubernetesVersion: "{{ k8s_version_major_minor }}" - channel: custom - autoSubscribe: false - providerRef: - apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 - kind: OpenStackClusterStackReleaseTemplate - name: cspotemplate - versions: - - "{{ cluster_stack_version }}" ---- -apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 -kind: OpenStackClusterStackReleaseTemplate -metadata: - name: cspotemplate -spec: - template: - spec: - identityRef: - kind: Secret - name: {{ cloud_name }} diff --git a/playbooks/openstack/templates/cluster.yaml.j2 b/playbooks/openstack/templates/cluster.yaml.j2 deleted file mode 100644 index 9510da7f..00000000 --- a/playbooks/openstack/templates/cluster.yaml.j2 +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: test-cluster - labels: - managed-secret: cloud-config -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - serviceDomain: cluster.local - services: - cidrBlocks: - - 10.96.0.0/12 - topology: - variables: - - name: controller_flavor - value: "SCS-2V-4-50" - - name: worker_flavor - value: "SCS-2V-4-50" - - name: external_id - value: "ebfe5546-f09f-4f42-ab54-094e457d42ec" # gx-scs - - name: cloud_name - value: {{ cloud_name }} - - name: secret_name - value: {{ cloud_name }} - - name: controller_server_group_id - value: {{ controller_server_group_id_value }} - - name: worker_server_group_id - value: {{ worker_server_group_id_value }} - class: {{ cluster_stack_name }}-{{ cluster_stack_version }} - controlPlane: - replicas: 3 - version: {{ k8s_version }} - workers: - machineDeployments: - - class: default-worker - failureDomain: nova - name: {{ cluster_stack_name }} - replicas: 3 diff --git a/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2 b/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2 deleted file mode 100644 index 71d9f3dc..00000000 --- a/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2 +++ /dev/null @@ -1,12 +0,0 @@ ---- -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -name: "{{ k8s_management_name }}" -nodes: -- role: control-plane - image: "kindest/node:{{ k8s_management_version }}" -- role: worker - image: "kindest/node:{{ k8s_management_version }}" - extraMounts: - - hostPath: "{{ cluster_stack_release_dir }}" - containerPath: "{{ cluster_stack_release_container_dir }}" diff --git a/playbooks/tasks/create_server_groups.yaml b/playbooks/tasks/create_server_groups.yaml deleted file mode 100644 index 5621aabf..00000000 --- a/playbooks/tasks/create_server_groups.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Create Server Groups for nodes - block: - - name: Ensure pip is installed - ansible.builtin.package: - name: python3-pip - state: present - become: true - - name: Install openstack cli - ansible.builtin.pip: - name: - - "python-openstackclient=={{ openstackclient_version }}" - extra_args: --user - - name: Create Server Group for control-plane nodes - ansible.builtin.command: "openstack server group create --policy anti-affinity -f value -c id test-cluster-controller" - register: srvgrp_controller - environment: - OS_CLOUD: "{{ cloud_name }}" - changed_when: true - - name: Create Server Group for worker nodes - ansible.builtin.command: "openstack server group create --policy soft-anti-affinity -f value -c id test-cluster-worker" - register: srvgrp_worker - environment: - OS_CLOUD: "{{ cloud_name }}" - changed_when: true diff --git a/playbooks/tasks/csctl.yaml b/playbooks/tasks/csctl.yaml deleted file mode 100644 index 41471b07..00000000 --- a/playbooks/tasks/csctl.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- name: Install csctl and csctl-openstack - vars: - csctl_dir: "{{ ansible_user_dir }}/csctl" - csctl_openstack_dir: "{{ ansible_user_dir }}/csctl-openstack" - csctl_version: "0.0.3" - csctl_openstack_version: "0.0.1" - install_dir: "{{ ansible_user_dir }}/.local/bin" - block: - - name: Make sure csctl directory exists - ansible.builtin.file: - path: "{{ csctl_dir }}" - state: directory - mode: 0755 - - name: Make sure csctl-openstack directory exists - ansible.builtin.file: - path: "{{ csctl_openstack_dir }}" - state: directory - mode: 0755 - - name: Get csctl release - ansible.builtin.unarchive: - src: "https://github.com/SovereignCloudStack/csctl/releases/download/v{{ csctl_version }}/csctl_{{ csctl_version }}_linux_amd64.tar.gz" - dest: "{{ csctl_dir }}" - remote_src: true - - name: Get csctl-openstack release - ansible.builtin.unarchive: - src: "https://github.com/SovereignCloudStack/csctl-plugin-openstack/releases/download/v{{ csctl_openstack_version }}/csctl-plugin-openstack_{{ csctl_openstack_version }}_linux_amd64.tar.gz" - dest: "{{ csctl_openstack_dir }}" - remote_src: true - - name: Install csctl - ansible.builtin.copy: - src: "{{ csctl_dir }}/csctl" - dest: "{{ install_dir }}/csctl" - mode: "+x" - remote_src: true - - name: Install csctl-openstack - ansible.builtin.copy: - src: "{{ csctl_openstack_dir }}/csctl-openstack" - dest: "{{ install_dir }}/csctl-openstack" - mode: "+x" - remote_src: true diff --git a/playbooks/tasks/cso.yaml b/playbooks/tasks/cso.yaml deleted file mode 100644 index 8a93f430..00000000 --- a/playbooks/tasks/cso.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Install CSO - vars: - cso_version: "0.1.0-alpha.5" - cso_dir: "{{ ansible_user_dir }}/cso" - cso_wait_for_pods: 240 - install_dir: "{{ ansible_user_dir }}/.local/bin" - block: - - name: Make sure CSO directory exists - ansible.builtin.file: - path: "{{ cso_dir }}" - state: directory - mode: 0755 - - name: Get CSO manifest - ansible.builtin.get_url: - url: "https://github.com/sovereignCloudStack/cluster-stack-operator/releases/download/v{{ cso_version }}/cso-infrastructure-components.yaml" - dest: "{{ cso_dir }}/cso-infrastructure-components.yaml" - mode: "+w" - - name: Patch the CSO deployment - enable the local mode and mount the cluster stack release - ansible.builtin.script: - cmd: "../files/patch_csx_deployment.sh {{ cso_dir }}/cso-infrastructure-components.yaml {{ release_dir }}" - executable: /bin/bash - changed_when: true - - name: Apply CSO manifest - ansible.builtin.shell: - cmd: | - set -o pipefail - cat {{ cso_dir }}/cso-infrastructure-components.yaml | {{ install_dir }}/envsubst | kubectl apply -f - - executable: /bin/bash - changed_when: true - environment: - GIT_PROVIDER_B64: Z2l0aHVi # github - GIT_ORG_NAME_B64: U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack - GIT_REPOSITORY_NAME_B64: Y2x1c3Rlci1zdGFja3M= # cluster-stacks - # FIXME: It should be fetched from the zuul secret - # GIT_ACCESS_TOKEN_B64: - - name: Wait for all CSO pods to become ready - ansible.builtin.command: "kubectl wait -n cso-system --for=condition=Ready --timeout={{ cso_wait_for_pods }}s pod --all" - changed_when: true diff --git a/playbooks/tasks/cspo.yaml b/playbooks/tasks/cspo.yaml deleted file mode 100644 index 04811bd2..00000000 --- a/playbooks/tasks/cspo.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Install CSPO - vars: - cspo_version: "0.1.0-alpha.3" - cspo_dir: "{{ ansible_user_dir }}/cspo" - cspo_wait_for_pods: 240 - install_dir: "{{ ansible_user_dir }}/.local/bin" - block: - - name: Make sure CSPO directory exists - ansible.builtin.file: - path: "{{ cspo_dir }}" - state: directory - mode: 0755 - - name: Get CSPO manifest - ansible.builtin.get_url: - url: "https://github.com/sovereignCloudStack/cluster-stack-provider-openstack/releases/download/v{{ cspo_version }}/cspo-infrastructure-components.yaml" - dest: "{{ cspo_dir }}/cspo-infrastructure-components.yaml" - mode: "+w" - - name: Patch the CSPO deployment - enable the local mode and mount the cluster stack release - ansible.builtin.script: - cmd: "../files/patch_csx_deployment.sh {{ cspo_dir }}/cspo-infrastructure-components.yaml {{ release_dir }}" - executable: /bin/bash - changed_when: true - - name: Apply CSPO manifest - ansible.builtin.shell: - cmd: | - set -o pipefail - cat {{ cspo_dir }}/cspo-infrastructure-components.yaml | {{ install_dir }}/envsubst | kubectl apply -f - - executable: /bin/bash - changed_when: true - environment: - GIT_PROVIDER_B64: Z2l0aHVi # github - GIT_ORG_NAME_B64: U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack - GIT_REPOSITORY_NAME_B64: Y2x1c3Rlci1zdGFja3M= # cluster-stacks - # FIXME: It should be fetched from the zuul secret - # GIT_ACCESS_TOKEN_B64: - - name: Wait for all CSPO pods to become ready - ansible.builtin.command: "kubectl wait -n cspo-system --for=condition=Ready --timeout={{ cspo_wait_for_pods }}s pod --all" - changed_when: true diff --git a/playbooks/tasks/label_nodes.yaml b/playbooks/tasks/label_nodes.yaml deleted file mode 100644 index d67c71ab..00000000 --- a/playbooks/tasks/label_nodes.yaml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- name: Label k8s nodes based on OpenStack host IDs - vars: - # Note (@mfeder): The following label key serves as a temporary label until upstream - # proposes and implements an alternative label key/solution for indicating a physical machine - # within the Kubernetes cluster. - # refer to: https://github.com/SovereignCloudStack/issues/issues/540 - label_key: "topology.scs.community/host-id" - jq_version: "1.7.1" - install_dir: "{{ ansible_user_dir }}/.local/bin" - block: - - name: Check if `os_cloud` variable is defined - ansible.builtin.fail: - msg: "os_cloud is not defined or empty" - when: os_cloud is not defined or os_cloud == '' - - name: Check if `kubeconfig_path` variable is defined - ansible.builtin.fail: - msg: "kubeconfig_path is not defined or empty" - when: kubeconfig_path is not defined or kubeconfig_path == '' - - name: Install jq - ansible.builtin.get_url: - url: "https://github.com/jqlang/jq/releases/download/jq-{{ jq_version }}/jq-linux64" - dest: "{{ install_dir }}/jq" - mode: "+x" - # TODO: use `checksum` attr here to verify the digest of the destination file, if available - - name: Get list of OpenStack server details - ansible.builtin.shell: - cmd: | - set -o pipefail - openstack server list -f json | jq -r '.[].ID' | while read id; do openstack server show $id -f json; done | jq -s '.' - executable: /bin/bash - register: openstack_server_list - changed_when: false - environment: - OS_CLOUD: "{{ os_cloud }}" - - name: Populate openstack_hosts dict with hostname=host_id pairs - ansible.builtin.set_fact: - openstack_hosts: "{{ openstack_hosts | default({}) | combine({item.name: item.hostId}) }}" - with_items: "{{ openstack_server_list.stdout | from_json }}" - - name: Get a list of nodes - ansible.builtin.command: kubectl get nodes -o json - register: kubernetes_node_list - changed_when: false - environment: - KUBECONFIG: "{{ kubeconfig_path }}" - - name: Add node label - ansible.builtin.command: "kubectl label nodes {{ item.metadata.name }} {{ label_key }}={{ openstack_hosts[item.metadata.name] }}" - with_items: "{{ (kubernetes_node_list.stdout | from_json)['items'] }}" - changed_when: false - environment: - KUBECONFIG: "{{ kubeconfig_path }}" diff --git a/playbooks/tasks/scs_compliance.yaml b/playbooks/tasks/scs_compliance.yaml deleted file mode 100644 index 843efc7e..00000000 --- a/playbooks/tasks/scs_compliance.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: Download, install, configure, and execute SCS KaaS compliance check - vars: - check_dir: "{{ ansible_user_dir }}/scs-compliance" - python_venv_dir: "{{ ansible_user_dir }}/scs-compliance/venv" - block: - - name: Check if `kubeconfig_path` variable is defined - ansible.builtin.fail: - msg: "kubeconfig_path is not defined or empty" - when: kubeconfig_path is not defined or kubeconfig_path == '' - - name: Ensure check directory - ansible.builtin.file: - path: "{{ check_dir }}" - state: directory - mode: 0755 - - name: Get SCS KaaS compliance check assets - ansible.builtin.git: - repo: https://github.com/SovereignCloudStack/standards.git - dest: "{{ check_dir }}" - single_branch: true - version: main - - name: Install virtualenv - ansible.builtin.package: - name: virtualenv - become: true - - name: Install check requirements - ansible.builtin.pip: - requirements: "{{ check_dir }}/Tests/requirements.txt" - virtualenv: "{{ python_venv_dir }}" - - name: Execute SCS KaaS compliance check - ansible.builtin.shell: - cmd: - ". {{ python_venv_dir }}/bin/activate && - python3 {{ check_dir }}/Tests/scs-compliance-check.py {{ check_dir }}/Tests/scs-compatible-kaas.yaml -v -s KaaS_V2 -a kubeconfig={{ kubeconfig_path }}" - changed_when: false - register: scs_compliance_results - always: - - name: Parse SCS KaaS compliance results # noqa: ignore-errors - ansible.builtin.set_fact: - scs_compliance_results_parsed: "{{ scs_compliance_results.stdout }}" - when: scs_compliance_results is defined - ignore_errors: true - - name: Insert SCS compliance results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors - zuul_return: - data: - zuul: - warnings: - - "
\n SCS Compliance results\n{{ scs_compliance_results_parsed }}\n
" - when: scs_compliance_results_parsed is defined and scs_compliance_results_parsed | length > 0 - ignore_errors: true diff --git a/playbooks/tasks/sonobouy.yaml b/playbooks/tasks/sonobouy.yaml deleted file mode 100644 index 8b166b96..00000000 --- a/playbooks/tasks/sonobouy.yaml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- name: Execute sonobouy check mode {{ sonobouy.mode }} - vars: - sonobuoy_version: "0.57.1" - install_dir: "{{ ansible_user_dir }}/.local/bin" - block: - - name: Install Sonobuoy - ansible.builtin.unarchive: - src: "https://github.com/vmware-tanzu/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_amd64.tar.gz" - dest: "{{ install_dir }}" - mode: '+x' - remote_src: true - args: - creates: "{{ install_dir }}/sonobuoy" - - name: Run Sonobuoy tests - ansible.builtin.command: "sonobuoy run --plugin-env=e2e.E2E_PROVIDER=openstack --mode={{ sonobouy.mode }}" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - changed_when: true - - name: Wait for Sonobuoy tests to complete - ansible.builtin.shell: - cmd: | - set -o pipefail - until sonobuoy status | grep "has completed" >/dev/null 2>&1; do - sleep 10 - sonobuoy status - done - executable: /bin/bash - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - changed_when: true - - name: Sonobuoy status - ansible.builtin.command: "sonobuoy status" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - register: sonobuoy_status - changed_when: true - - name: Retrieve Sonobuoy results - ansible.builtin.command: "sonobuoy retrieve" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - register: sonobuoy_retrieve_output - changed_when: true - - name: Get Sonobuoy results - ansible.builtin.command: "sonobuoy results {{ sonobuoy_retrieve_output.stdout }}" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - register: sonobouy_results - changed_when: true - - name: Delete k8s resources that were generated by a Sonobuoy run - ansible.builtin.command: "sonobuoy delete --all" - environment: - KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster" - changed_when: true - - name: Remove Sonobuoy retrieve file - ansible.builtin.file: - path: "{{ sonobuoy_retrieve_output.stdout }}" - state: absent - - name: Insert sonobouy results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors - zuul_return: - data: - zuul: - warnings: - - "
\n Sonobouy results\n{{ sonobouy_results.stdout }}\n
" - when: sonobouy_results is defined and sonobouy_results | length > 0 - ignore_errors: true